hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d045dd7d8065e060ae46feca61a4cacbd267a31a
| 201
|
py
|
Python
|
DLplatform/parameters/__init__.py
|
chelseajohn/dlplatform
|
429e42c598039d1e9fd1df3da4247f391915a31b
|
[
"Apache-2.0"
] | 5
|
2020-05-05T08:54:26.000Z
|
2021-02-20T07:36:28.000Z
|
DLplatform/parameters/__init__.py
|
zagazao/dlplatform
|
ab32af8f89cfec4b478203bd5d13ce2d30e89ba7
|
[
"Apache-2.0"
] | 1
|
2020-11-16T14:15:53.000Z
|
2020-11-16T14:15:53.000Z
|
DLplatform/parameters/__init__.py
|
zagazao/dlplatform
|
ab32af8f89cfec4b478203bd5d13ce2d30e89ba7
|
[
"Apache-2.0"
] | 4
|
2020-05-05T08:56:57.000Z
|
2020-07-22T11:28:52.000Z
|
from DLplatform.parameters.parameters import Parameters
#from DLplatform.parameters.kerasNNParameters import KerasNNParameters
#from DLplatform.parameters.pyTorchNNParameters import PyTorchNNParameters
| 67
| 74
| 0.905473
|
967eb0aaa9fa8310c4ad1c881f8c0c263bc36ad8
| 998
|
py
|
Python
|
modules/explore_anot.py
|
garthee/gnot
|
ac698c5cce0e2ebb77dd84c5d050417b735f5fbf
|
[
"MIT"
] | 3
|
2015-01-29T10:11:10.000Z
|
2016-04-11T05:32:23.000Z
|
modules/explore_anot.py
|
garthee/gnot
|
ac698c5cce0e2ebb77dd84c5d050417b735f5fbf
|
[
"MIT"
] | 1
|
2015-01-10T17:52:06.000Z
|
2015-01-10T20:50:16.000Z
|
modules/explore_anot.py
|
garthee/gnot
|
ac698c5cce0e2ebb77dd84c5d050417b735f5fbf
|
[
"MIT"
] | 4
|
2015-01-29T11:56:31.000Z
|
2018-04-12T01:48:11.000Z
|
#!/usr/bin/python
from werkzeug.wrappers import Response
from db import export_sql
def render(vis, request, info):
info["message"] = []
table = request.args.get("table", '')
Date = request.args.get("Date", '')
TargetField = request.args.get("TargetField", '')
ShortText = request.args.get("ShortText", '')
Text = request.args.get("Text", '')
where = request.args.get("where", '1=1')
reload = int(request.args.get("reload", 0))
view = request.args.get("view", '')
start = request.args.get("start", '0') # start at 0
limit = request.args.get("limit", '100000')
sql = "select '%s',%s,%s,%s from %s where %s order by 2 limit %s offset %s" % (
TargetField, Date, ShortText, Text, table, where, limit, start)
header = "TargetField,Date,ShortText,Text"
(datfile, reload, result) = export_sql(sql, vis.config, reload, header, view)
if len(result) == 0:
return Response(open(datfile, 'r'))
| 30.242424
| 84
| 0.602204
|
4e9ea11817fec8a376768e36fa2405a5c53fe545
| 3,768
|
py
|
Python
|
submission_form/views/user_create.py
|
NAKKA-K/dw2018_server
|
63d74b1206860d0d2213efbc8a7969be7976c4fd
|
[
"MIT"
] | null | null | null |
submission_form/views/user_create.py
|
NAKKA-K/dw2018_server
|
63d74b1206860d0d2213efbc8a7969be7976c4fd
|
[
"MIT"
] | 6
|
2018-02-08T12:26:04.000Z
|
2018-02-09T06:14:12.000Z
|
submission_form/views/user_create.py
|
NAKKA-K/dw2018_server
|
63d74b1206860d0d2213efbc8a7969be7976c4fd
|
[
"MIT"
] | null | null | null |
from django.views.generic.base import TemplateView, View
from django.contrib.sites.shortcuts import get_current_site
from django.urls import reverse, reverse_lazy
from django.http import Http404
from django.contrib import messages
from django.shortcuts import render, redirect
from django.db import transaction
from submission_form.views.LoginRequiredMessageMixin import LoginRequiredMessageMixin
from submission_form.models import Organization
from submission_form.forms import CustomUserCreationForm, TeacherForm, StudentForm
import hashlib
class LinkUserCreateView(TemplateView):
template_name = 'link_to_user_create.html'
def get(self, request, **kwargs):
if request.session.get('is_teacher', False) == False:
raise Http404
return super().get(request, **kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
org = self.request.session['user_info']['org']
# ユーザー作成ViewのURLを作成
if self.request.is_secure():
protocol = 'https'
else:
protocol = 'http'
domain = get_current_site(self.request).domain
url = "{}://{}".format(protocol, domain)
teacher_hash = hashlib.sha224(org.encode('utf-8'))
teacher_hash.update('teacher'.encode('utf-8'))
teacher_hash = teacher_hash.hexdigest()
student_hash = hashlib.sha224(org.encode('utf-8'))
student_hash.update('student'.encode('utf-8'))
student_hash = student_hash.hexdigest()
context['teacher_url'] = "{}{}".format(url,
reverse('user_create', kwargs = {
'uuid': org,
'uuid_hash': teacher_hash
})
)
context['student_url'] = "{}{}".format(url,
reverse('user_create', kwargs = {
'uuid': org,
'uuid_hash': student_hash
})
)
return context
class UserCreateView(View):
template_name = 'user_create.html'
success_url = reverse_lazy('index')
def get(self, request, **kwargs):
if self.is_teacher_create(self.kwargs['uuid'], self.kwargs['uuid_hash']):
data = {
'user_form': CustomUserCreationForm(),
'user_info_form': TeacherForm(),
}
else:
data = {
'user_form': CustomUserCreationForm(),
'user_info_form': StudentForm(),
}
return render(request, self.template_name, data)
@transaction.atomic
def post(self, request, **kwargs):
if self.is_teacher_create(self.kwargs['uuid'], self.kwargs['uuid_hash']):
user_form = CustomUserCreationForm(request.POST)
user_info_form = StudentForm(request.POST)
else:
user_form = CustomUserCreationForm(request.POST)
user_info_form = TeacherForm(request.POST)
if user_form.is_valid() and user_info_form.is_valid():
user = user_form.save()
user_info = user_info_form.save(commit = False)
user_info.user = user
user_info.organization_id = Organization.objects.get(id = self.kwargs['uuid'])
user_info.save()
messages.success(request, 'アカウントが作成されました。ようこそ、でじふぁーむ。へ')
return redirect('index')
data = {
'user_form': user_form,
'user_info_form': user_info_form,
}
messages.error(request, 'データに不備があります')
return render(request, self.template_name, data)
def is_teacher_create(self, uuid, uuid_hash):
if not Organization.objects.get(id = uuid):
raise Http404
# sha224はある程度重い処理なので、検査を分割することで少しばかりの処理速度軽減
teacher_hash = hashlib.sha224(str(uuid).encode('utf-8'))
teacher_hash.update('teacher'.encode('utf-8'))
if teacher_hash.hexdigest() == uuid_hash:
return True
student_hash = hashlib.sha224(str(uuid).encode('utf-8'))
student_hash.update('student'.encode('utf-8'))
if student_hash.hexdigest() == uuid_hash:
return False
else:
raise Http404
| 30.387097
| 85
| 0.687367
|
868b0b0b2d63f8288774def2a9d841c4c167b4f8
| 2,838
|
py
|
Python
|
dags/data_ingest_gcs_fhv.py
|
olegaobini/airflow-demo-ny-taxi-dataset
|
1b7db71dcfd728686054e1342077b7fa56f2236a
|
[
"MIT"
] | null | null | null |
dags/data_ingest_gcs_fhv.py
|
olegaobini/airflow-demo-ny-taxi-dataset
|
1b7db71dcfd728686054e1342077b7fa56f2236a
|
[
"MIT"
] | null | null | null |
dags/data_ingest_gcs_fhv.py
|
olegaobini/airflow-demo-ny-taxi-dataset
|
1b7db71dcfd728686054e1342077b7fa56f2236a
|
[
"MIT"
] | null | null | null |
import os
from airflow import DAG
from airflow.utils.dates import days_ago
from airflow.operators.bash import BashOperator
from airflow.operators.python import PythonOperator
from airflow.providers.google.cloud.operators.bigquery import BigQueryCreateExternalTableOperator
from scripts.ingest_gcs_script import _format_to_parquet, upload_to_gcs
AIRFLOW_HOME = os.environ.get("AIRFLOW_HOME", "/opt/airflow/")
PROJECT_ID = os.environ.get("GCP_PROJECT_ID")
BUCKET = os.environ.get("GCP_GCS_BUCKET")
dataset_file = "fhv_tripdata_{{ execution_date.strftime(\'%Y-%m\') }}"
TABLE_NAME_TEMPLATE = 'fhv_{{ execution_date.strftime(\'%Y_%m\') }}'
URL_PREFIX = 'https://s3.amazonaws.com/nyc-tlc/trip+data'
URL_TEMPLATE = f'{URL_PREFIX}/{dataset_file}.csv'
OUTPUT_FILE_TEMPLATE = f'{AIRFLOW_HOME}/{dataset_file}.csv'
PARQUET_FILE = OUTPUT_FILE_TEMPLATE.replace('.csv', '.parquet')
BIGQUERY_DATASET = os.environ.get("BIGQUERY_DATASET", 'trips_data_all')
default_args = {
"owner": "airflow",
"start_date": '2019-01-01',
"depends_on_past": False,
"retries": 1,
}
# NOTE: DAG declaration - using a Context Manager (an implicit way)
with DAG(
dag_id="data_ingest_gcs_fhv",
schedule_interval="@monthly",
default_args=default_args,
catchup=True,
max_active_runs=4,
tags=['for-hire vehicles'],
) as dag:
download_dataset = BashOperator(
task_id="download_datasets",
bash_command=f"curl -sSLf {URL_TEMPLATE} > {OUTPUT_FILE_TEMPLATE}"
)
format_to_parquet = PythonOperator(
task_id="format_to_parquet",
python_callable=_format_to_parquet,
op_kwargs={
"src_file": f"{OUTPUT_FILE_TEMPLATE}",
},
)
# TODO: Homework - research and try XCOM to communicate output values between 2 tasks/operators
local_to_gcs = PythonOperator(
task_id="local_to_gcs",
python_callable=upload_to_gcs,
op_kwargs={
"bucket": BUCKET,
"object_name": f"raw/{dataset_file}.parquet",
"local_file": f"{PARQUET_FILE}",
},
)
remove_local_data = BashOperator(
task_id="remove_local_data",
bash_command=f"rm -rf {AIRFLOW_HOME}{dataset_file}.*"
)
bigquery_external_table = BigQueryCreateExternalTableOperator(
task_id="bigquery_external_table",
table_resource={
"tableReference": {
"projectId": PROJECT_ID,
"datasetId": BIGQUERY_DATASET,
"tableId": "external_table",
},
"externalDataConfiguration": {
"sourceFormat": "PARQUET",
"sourceUris": [f"gs://{BUCKET}/raw/{dataset_file}.parquet"],
},
},
)
download_dataset >> format_to_parquet >> local_to_gcs >> remove_local_data >> bigquery_external_table
| 31.88764
| 105
| 0.672304
|
d52c6a7d2d4b371277a733aaf138d273f2d243a2
| 228
|
py
|
Python
|
tests/conftest.py
|
BrotherSymeon/yaabook
|
a7dfb9599b6cd3b9720aa87e42f1cf827a5c2dc4
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
BrotherSymeon/yaabook
|
a7dfb9599b6cd3b9720aa87e42f1cf827a5c2dc4
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
BrotherSymeon/yaabook
|
a7dfb9599b6cd3b9720aa87e42f1cf827a5c2dc4
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Dummy conftest.py for yaabook.
If you don't know what this is for, just leave it empty.
Read more about conftest.py under:
https://pytest.org/latest/plugins.html
"""
# import pytest
| 20.727273
| 60
| 0.649123
|
0376455f5e3b8f475e7d1368908dca66d094221b
| 35,700
|
py
|
Python
|
src/transformers/trainer.py
|
theorist17/adapter-transformers
|
17a1e3f24aca59e3b131a47685dcefdfc69fa090
|
[
"Apache-2.0"
] | null | null | null |
src/transformers/trainer.py
|
theorist17/adapter-transformers
|
17a1e3f24aca59e3b131a47685dcefdfc69fa090
|
[
"Apache-2.0"
] | null | null | null |
src/transformers/trainer.py
|
theorist17/adapter-transformers
|
17a1e3f24aca59e3b131a47685dcefdfc69fa090
|
[
"Apache-2.0"
] | null | null | null |
import json
import logging
import math
import os
import random
import re
import shutil
from contextlib import contextmanager
from pathlib import Path
from typing import Callable, Dict, List, Optional, Tuple
import numpy as np
import torch
from packaging import version
from torch import nn
from torch.utils.data.dataloader import DataLoader
from torch.utils.data.dataset import Dataset
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data.sampler import RandomSampler, Sampler, SequentialSampler
from tqdm.auto import tqdm, trange
from .adapter_bert import get_fusion_regularization_loss
from .data.data_collator import DataCollator, DefaultDataCollator
from .modeling_utils import PreTrainedModel
from .optimization import AdamW, get_linear_schedule_with_warmup
from .trainer_utils import PREFIX_CHECKPOINT_DIR, EvalPrediction, PredictionOutput, TrainOutput
from .training_args import TrainingArguments, is_tpu_available
try:
from apex import amp
_has_apex = True
except ImportError:
_has_apex = False
def is_apex_available():
return _has_apex
if is_tpu_available():
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
import torch_xla.distributed.parallel_loader as pl
try:
from torch.utils.tensorboard import SummaryWriter
_has_tensorboard = True
except ImportError:
try:
from tensorboardX import SummaryWriter
_has_tensorboard = True
except ImportError:
_has_tensorboard = False
def is_tensorboard_available():
return _has_tensorboard
try:
import wandb
wandb.ensure_configured()
if wandb.api.api_key is None:
_has_wandb = False
wandb.termwarn("W&B installed but not logged in. Run `wandb login` or set the WANDB_API_KEY env variable.")
else:
_has_wandb = False if os.getenv("WANDB_DISABLED") else True
except ImportError:
_has_wandb = False
def is_wandb_available():
return _has_wandb
logger = logging.getLogger(__name__)
def set_seed(seed: int):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# ^^ safe to call this function even if cuda is not available
@contextmanager
def torch_distributed_zero_first(local_rank: int):
"""
Decorator to make all processes in distributed training wait for each local_master to do something.
"""
if local_rank not in [-1, 0]:
torch.distributed.barrier()
yield
if local_rank == 0:
torch.distributed.barrier()
class SequentialDistributedSampler(Sampler):
"""
Distributed Sampler that subsamples indicies sequentially,
making it easier to collate all results at the end.
Even though we only use this sampler for eval and predict (no training),
which means that the model params won't have to be synced (i.e. will not hang
for synchronization even if varied number of forward passes), we still add extra
samples to the sampler to make it evenly divisible (like in `DistributedSampler`)
to make it easy to `gather` or `reduce` resulting tensors at the end of the loop.
"""
def __init__(self, dataset, num_replicas=None, rank=None):
if num_replicas is None:
if not torch.distributed.is_available():
raise RuntimeError("Requires distributed package to be available")
num_replicas = torch.distributed.get_world_size()
if rank is None:
if not torch.distributed.is_available():
raise RuntimeError("Requires distributed package to be available")
rank = torch.distributed.get_rank()
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
def __iter__(self):
indices = list(range(len(self.dataset)))
# add extra samples to make it evenly divisible
indices += indices[: (self.total_size - len(indices))]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank * self.num_samples : (self.rank + 1) * self.num_samples]
assert len(indices) == self.num_samples
return iter(indices)
def __len__(self):
return self.num_samples
def get_tpu_sampler(dataset: Dataset):
if xm.xrt_world_size() <= 1:
return RandomSampler(dataset)
return DistributedSampler(dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal())
class Trainer:
"""
Trainer is a simple but feature-complete training and eval loop for PyTorch,
optimized for Transformers.
"""
model: PreTrainedModel
args: TrainingArguments
data_collator: DataCollator
train_dataset: Optional[Dataset]
eval_dataset: Optional[Dataset]
compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None
prediction_loss_only: bool
tb_writer: Optional["SummaryWriter"] = None
optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = None
global_step: Optional[int] = None
epoch: Optional[float] = None
def __init__(
self,
model: PreTrainedModel,
args: TrainingArguments,
data_collator: Optional[DataCollator] = None,
train_dataset: Optional[Dataset] = None,
eval_dataset: Optional[Dataset] = None,
compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None,
prediction_loss_only=False,
do_save_full_model: bool = True,
do_save_adapters: bool = False,
do_save_adapter_fusion: bool = False,
adapter_names: Optional[List[List[str]]] = None,
tb_writer: Optional["SummaryWriter"] = None,
optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = None,
):
"""
Trainer is a simple but feature-complete training and eval loop for PyTorch,
optimized for Transformers.
Args:
prediction_loss_only:
(Optional) in evaluation and prediction, only return the loss
"""
self.model = model.to(args.device)
self.args = args
if data_collator is not None:
self.data_collator = data_collator
else:
self.data_collator = DefaultDataCollator()
self.train_dataset = train_dataset
self.eval_dataset = eval_dataset
self.compute_metrics = compute_metrics
self.prediction_loss_only = prediction_loss_only
self.optimizers = optimizers
# if tb_writer is not None:
# self.tb_writer = tb_writer
# elif is_tensorboard_available() and self.is_world_master():
# self.tb_writer = SummaryWriter(log_dir=self.args.logging_dir)
if not is_tensorboard_available():
logger.warning(
"You are instantiating a Trainer but Tensorboard is not installed. You should consider installing it."
)
if is_wandb_available():
self._setup_wandb()
else:
logger.info(
"You are instantiating a Trainer but W&B is not installed. To use wandb logging, "
"run `pip install wandb; wandb login` see https://docs.wandb.com/huggingface."
)
set_seed(self.args.seed)
# Create output directory if needed
if self.is_world_master():
os.makedirs(self.args.output_dir, exist_ok=True)
# adapters used
self.do_save_full_model = do_save_full_model
self.do_save_adapters = do_save_adapters
self.do_save_adapter_fusion = do_save_adapter_fusion
self.adapter_names = adapter_names
if is_tpu_available():
# Set an xla_device flag on the model's config.
# We'll find a more elegant and not need to do this in the future.
self.model.config.xla_device = True
def get_train_dataloader(self) -> DataLoader:
if self.train_dataset is None:
raise ValueError("Trainer: training requires a train_dataset.")
if is_tpu_available():
train_sampler = get_tpu_sampler(self.train_dataset)
else:
train_sampler = (
RandomSampler(self.train_dataset)
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset)
)
data_loader = DataLoader(
self.train_dataset,
batch_size=self.args.train_batch_size,
sampler=train_sampler,
collate_fn=self.data_collator.collate_batch,
)
return data_loader
def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoader:
if eval_dataset is None and self.eval_dataset is None:
raise ValueError("Trainer: evaluation requires an eval_dataset.")
eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset
if is_tpu_available():
sampler = SequentialDistributedSampler(
eval_dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal()
)
elif self.args.local_rank != -1:
sampler = SequentialDistributedSampler(eval_dataset)
else:
sampler = SequentialSampler(eval_dataset)
data_loader = DataLoader(
eval_dataset,
sampler=sampler,
batch_size=self.args.eval_batch_size,
collate_fn=self.data_collator.collate_batch,
)
return data_loader
def get_test_dataloader(self, test_dataset: Dataset) -> DataLoader:
# We use the same batch_size as for eval.
if is_tpu_available():
sampler = SequentialDistributedSampler(
test_dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal()
)
elif self.args.local_rank != -1:
sampler = SequentialDistributedSampler(test_dataset)
else:
sampler = SequentialSampler(test_dataset)
data_loader = DataLoader(
test_dataset,
sampler=sampler,
batch_size=self.args.eval_batch_size,
collate_fn=self.data_collator.collate_batch,
)
return data_loader
def get_optimizers(
self, num_training_steps: int
) -> Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]:
"""
Setup the optimizer and the learning rate scheduler.
We provide a reasonable default that works well.
If you want to use something else, you can pass a tuple in the Trainer's init,
or override this method in a subclass.
"""
if self.optimizers is not None:
return self.optimizers
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
if hasattr(self.model.config, "adapter_fusion_models"):
no_decay += [f"adapter_fusion_layer.{n}.value" for n in self.model.config.adapter_fusion_models]
optimizer_grouped_parameters = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=self.args.learning_rate, eps=self.args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=self.args.warmup_steps, num_training_steps=num_training_steps
)
return optimizer, scheduler
def _setup_wandb(self):
"""
Setup the optional Weights & Biases (`wandb`) integration.
One can override this method to customize the setup if needed. Find more information at https://docs.wandb.com/huggingface
You can also override the following environment variables:
Environment:
WANDB_WATCH:
(Optional, ["gradients", "all", "false"]) "gradients" by default, set to "false" to disable gradient logging
or "all" to log gradients and parameters
WANDB_PROJECT:
(Optional): str - "huggingface" by default, set this to a custom string to store results in a different project
WANDB_DISABLED:
(Optional): boolean - defaults to false, set to "true" to disable wandb entirely
"""
logger.info('Automatic Weights & Biases logging enabled, to disable set os.environ["WANDB_DISABLED"] = "true"')
wandb.init(project=os.getenv("WANDB_PROJECT", "huggingface"), config=vars(self.args))
# keep track of model topology and gradients
if os.getenv("WANDB_WATCH") != "false":
wandb.watch(
self.model, log=os.getenv("WANDB_WATCH", "gradients"), log_freq=max(100, self.args.logging_steps)
)
def num_examples(self, dataloader: DataLoader) -> int:
"""
Helper to get num of examples from a DataLoader, by accessing its Dataset.
"""
return len(dataloader.dataset)
def train(self, model_path: Optional[str] = None):
"""
Main training entry point.
Args:
model_path:
(Optional) Local path to model if model to train has been instantiated from a local path
If present, we will try reloading the optimizer/scheduler states from there.
"""
train_dataloader = self.get_train_dataloader()
if self.args.max_steps > 0:
t_total = self.args.max_steps
num_train_epochs = (
self.args.max_steps // (len(train_dataloader) // self.args.gradient_accumulation_steps) + 1
)
else:
t_total = int(len(train_dataloader) // self.args.gradient_accumulation_steps * self.args.num_train_epochs)
num_train_epochs = self.args.num_train_epochs
optimizer, scheduler = self.get_optimizers(num_training_steps=t_total)
# Check if saved optimizer or scheduler states exist
if (
model_path is not None
and os.path.isfile(os.path.join(model_path, "optimizer.pt"))
and os.path.isfile(os.path.join(model_path, "scheduler.pt"))
):
# Load in optimizer and scheduler states
optimizer.load_state_dict(
torch.load(os.path.join(model_path, "optimizer.pt"), map_location=self.args.device)
)
scheduler.load_state_dict(torch.load(os.path.join(model_path, "scheduler.pt")))
model = self.model
if self.args.fp16:
if not is_apex_available():
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=self.args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if self.args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if self.args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[self.args.local_rank],
output_device=self.args.local_rank,
find_unused_parameters=True,
)
if self.tb_writer is not None:
self.tb_writer.add_text("args", self.args.to_json_string())
self.tb_writer.add_hparams(self.args.to_sanitized_dict(), metric_dict={})
# Train!
if is_tpu_available():
total_train_batch_size = self.args.train_batch_size * xm.xrt_world_size()
else:
total_train_batch_size = (
self.args.train_batch_size
* self.args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if self.args.local_rank != -1 else 1)
)
logger.info("***** Running training *****")
logger.info(" Num examples = %d", self.num_examples(train_dataloader))
logger.info(" Num Epochs = %d", num_train_epochs)
logger.info(" Instantaneous batch size per device = %d", self.args.per_device_train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d", total_train_batch_size)
logger.info(" Gradient Accumulation steps = %d", self.args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
self.global_step = 0
self.epoch = 0
epochs_trained = 0
steps_trained_in_current_epoch = 0
# Check if continuing training from a checkpoint
if model_path is not None:
# set global_step to global_step of last saved checkpoint from model path
try:
self.global_step = int(model_path.split("-")[-1].split("/")[0])
epochs_trained = self.global_step // (len(train_dataloader) // self.args.gradient_accumulation_steps)
steps_trained_in_current_epoch = self.global_step % (
len(train_dataloader) // self.args.gradient_accumulation_steps
)
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", self.global_step)
logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch)
except ValueError:
self.global_step = 0
logger.info(" Starting fine-tuning.")
tr_loss = 0.0
logging_loss = 0.0
model.zero_grad()
train_iterator = trange(
epochs_trained, int(num_train_epochs), desc="Epoch", disable=not self.is_local_master()
)
for epoch in train_iterator:
if isinstance(train_dataloader, DataLoader) and isinstance(train_dataloader.sampler, DistributedSampler):
train_dataloader.sampler.set_epoch(epoch)
if is_tpu_available():
parallel_loader = pl.ParallelLoader(train_dataloader, [self.args.device]).per_device_loader(
self.args.device
)
epoch_iterator = tqdm(parallel_loader, desc="Iteration", disable=not self.is_local_master())
else:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=not self.is_local_master())
for step, inputs in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
tr_loss += self._training_step(model, inputs, optimizer)
if (step + 1) % self.args.gradient_accumulation_steps == 0 or (
# last step in epoch but step is always smaller than gradient_accumulation_steps
len(epoch_iterator) <= self.args.gradient_accumulation_steps
and (step + 1) == len(epoch_iterator)
):
# apply adapter fusion weight regularization on the value matrix
if hasattr(self.model.config, "adapter_fusion") and self.model.config.adapter_fusion["regularization"]:
fusion_reg_loss = get_fusion_regularization_loss(self.model)
fusion_reg_loss.backward()
if self.args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), self.args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), self.args.max_grad_norm)
if is_tpu_available():
xm.optimizer_step(optimizer)
else:
optimizer.step()
scheduler.step()
model.zero_grad()
self.global_step += 1
self.epoch = epoch + (step + 1) / len(epoch_iterator)
if (self.args.logging_steps > 0 and self.global_step % self.args.logging_steps == 0) or (
self.global_step == 1 and self.args.logging_first_step
):
logs: Dict[str, float] = {}
logs["loss"] = (tr_loss - logging_loss) / self.args.logging_steps
# backward compatibility for pytorch schedulers
logs["learning_rate"] = (
scheduler.get_last_lr()[0]
if version.parse(torch.__version__) >= version.parse("1.4")
else scheduler.get_lr()[0]
)
logging_loss = tr_loss
self._log(logs)
if self.args.evaluate_during_training:
self.evaluate()
if self.args.save_steps > 0 and self.global_step % self.args.save_steps == 0:
# In all cases (even distributed/parallel), self.model is always a reference
# to the model we want to save.
if hasattr(model, "module"):
assert model.module is self.model
else:
assert model is self.model
# Save model checkpoint
output_dir = os.path.join(self.args.output_dir, f"{PREFIX_CHECKPOINT_DIR}-{self.global_step}")
self.save_model(output_dir)
if self.is_world_master():
self._rotate_checkpoints()
if is_tpu_available():
xm.rendezvous("saving_optimizer_states")
xm.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
xm.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
elif self.is_world_master():
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
if self.args.max_steps > 0 and self.global_step > self.args.max_steps:
epoch_iterator.close()
break
if self.args.max_steps > 0 and self.global_step > self.args.max_steps:
train_iterator.close()
break
if self.args.tpu_metrics_debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
if self.tb_writer:
self.tb_writer.close()
if self.do_save_adapters:
logger.info("\n\nTraining completed. Do not forget to share your adapters on https://adapterhub.ml =)\n\n")
else:
logger.info("\n\nTraining completed. Do not forget to share your model on huggingface.co/models =)\n\n")
return TrainOutput(self.global_step, tr_loss / self.global_step)
def _log(self, logs: Dict[str, float], iterator: Optional[tqdm] = None) -> None:
if self.epoch is not None:
logs["epoch"] = self.epoch
if self.tb_writer:
for k, v in logs.items():
self.tb_writer.add_scalar(k, v, self.global_step)
if is_wandb_available():
wandb.log(logs, step=self.global_step)
output = json.dumps({**logs, **{"step": self.global_step}})
if iterator is not None:
iterator.write(output)
else:
print(output)
def _training_step(
self, model: nn.Module, inputs: Dict[str, torch.Tensor], optimizer: torch.optim.Optimizer
) -> float:
model.train()
for k, v in inputs.items():
inputs[k] = v.to(self.args.device)
if self.adapter_names:
inputs["adapter_names"] = self.adapter_names
outputs = model(**inputs)
loss = outputs[0] # model outputs are always tuple in transformers (see doc)
if self.args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if self.args.gradient_accumulation_steps > 1:
loss = loss / self.args.gradient_accumulation_steps
if self.args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
return loss.item()
def is_local_master(self) -> bool:
if is_tpu_available():
return xm.is_master_ordinal(local=True)
else:
return self.args.local_rank in [-1, 0]
def is_world_master(self) -> bool:
"""
This will be True only in one process, even in distributed mode,
even when training on multiple machines.
"""
if is_tpu_available():
return xm.is_master_ordinal(local=False)
else:
return self.args.local_rank == -1 or torch.distributed.get_rank() == 0
def save_model(self, output_dir: Optional[str] = None):
"""
Saving best-practices: if you use default names for the model,
you can reload it using from_pretrained().
Will only save from the world_master process (unless in TPUs).
"""
if is_tpu_available():
self._save_tpu(output_dir)
elif self.is_world_master():
self._save(output_dir)
def _save_tpu(self, output_dir: Optional[str] = None):
output_dir = output_dir if output_dir is not None else self.args.output_dir
logger.info("Saving model checkpoint to %s", output_dir)
if xm.is_master_ordinal():
os.makedirs(output_dir, exist_ok=True)
torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
# Save a trained model and configuration using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
if not isinstance(self.model, PreTrainedModel):
raise ValueError("Trainer.model appears to not be a PreTrainedModel")
xm.rendezvous("saving_checkpoint")
if self.do_save_adapters:
self.model.save_all_adapters(output_dir)
if self.do_save_adapter_fusion:
self.model.save_all_adapter_fusions(output_dir)
if self.do_save_full_model:
self.model.save_pretrained(output_dir)
def _save(self, output_dir: Optional[str] = None):
output_dir = output_dir if output_dir is not None else self.args.output_dir
os.makedirs(output_dir, exist_ok=True)
logger.info("Saving model checkpoint to %s", output_dir)
# Save a trained model and configuration using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
if not isinstance(self.model, PreTrainedModel):
raise ValueError("Trainer.model appears to not be a PreTrainedModel")
if self.do_save_adapters:
self.model.save_all_adapters(output_dir)
if self.do_save_adapter_fusion:
self.model.save_all_adapter_fusions(output_dir)
if self.do_save_full_model:
self.model.save_pretrained(output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
def _sorted_checkpoints(self, checkpoint_prefix=PREFIX_CHECKPOINT_DIR, use_mtime=False) -> List[str]:
ordering_and_checkpoint_path = []
glob_checkpoints = [str(x) for x in Path(self.args.output_dir).glob(f"{checkpoint_prefix}-*")]
for path in glob_checkpoints:
if use_mtime:
ordering_and_checkpoint_path.append((os.path.getmtime(path), path))
else:
regex_match = re.match(f".*{checkpoint_prefix}-([0-9]+)", path)
if regex_match and regex_match.groups():
ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))
checkpoints_sorted = sorted(ordering_and_checkpoint_path)
checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]
return checkpoints_sorted
def _rotate_checkpoints(self, use_mtime=False) -> None:
if self.args.save_total_limit is None or self.args.save_total_limit <= 0:
return
# Check if we should delete older checkpoint(s)
checkpoints_sorted = self._sorted_checkpoints(use_mtime=use_mtime)
if len(checkpoints_sorted) <= self.args.save_total_limit:
return
number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - self.args.save_total_limit)
checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]
for checkpoint in checkpoints_to_be_deleted:
logger.info("Deleting older checkpoint [{}] due to args.save_total_limit".format(checkpoint))
shutil.rmtree(checkpoint)
def evaluate(
self, eval_dataset: Optional[Dataset] = None, prediction_loss_only: Optional[bool] = None,
) -> Dict[str, float]:
"""
Run evaluation and return metrics.
The calling script will be responsible for providing a method to compute metrics, as they are
task-dependent.
Args:
eval_dataset: (Optional) Pass a dataset if you wish to override
the one on the instance.
Returns:
A dict containing:
- the eval loss
- the potential metrics computed from the predictions
"""
eval_dataloader = self.get_eval_dataloader(eval_dataset)
output = self._prediction_loop(eval_dataloader, description="Evaluation")
self._log(output.metrics)
if self.args.tpu_metrics_debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
return output.metrics
def predict(self, test_dataset: Dataset) -> PredictionOutput:
"""
Run prediction and return predictions and potential metrics.
Depending on the dataset and your use case, your test dataset may contain labels.
In that case, this method will also return metrics, like in evaluate().
"""
test_dataloader = self.get_test_dataloader(test_dataset)
return self._prediction_loop(test_dataloader, description="Prediction")
def _prediction_loop(
self, dataloader: DataLoader, description: str, prediction_loss_only: Optional[bool] = None
) -> PredictionOutput:
"""
Prediction/evaluation loop, shared by `evaluate()` and `predict()`.
Works both with or without labels.
"""
prediction_loss_only = prediction_loss_only if prediction_loss_only is not None else self.prediction_loss_only
model = self.model
# multi-gpu eval
if self.args.n_gpu > 1:
model = torch.nn.DataParallel(model)
else:
model = self.model
# Note: in torch.distributed mode, there's no point in wrapping the model
# inside a DistributedDataParallel as we'll be under `no_grad` anyways.
batch_size = dataloader.batch_size
logger.info("***** Running %s *****", description)
logger.info(" Num examples = %d", self.num_examples(dataloader))
logger.info(" Batch size = %d", batch_size)
eval_losses: List[float] = []
preds: torch.Tensor = None
label_ids: torch.Tensor = None
model.eval()
if is_tpu_available():
dataloader = pl.ParallelLoader(dataloader, [self.args.device]).per_device_loader(self.args.device)
for inputs in tqdm(dataloader, desc=description):
has_labels = any(inputs.get(k) is not None for k in ["labels", "lm_labels", "masked_lm_labels"])
for k, v in inputs.items():
inputs[k] = v.to(self.args.device)
if self.adapter_names:
inputs["adapter_names"] = self.adapter_names
with torch.no_grad():
outputs = model(**inputs)
if has_labels:
step_eval_loss, logits = outputs[:2]
eval_losses += [step_eval_loss.mean().item()]
else:
logits = outputs[0]
if not prediction_loss_only:
if preds is None:
preds = logits.detach()
else:
preds = torch.cat((preds, logits.detach()), dim=0)
if inputs.get("labels") is not None:
if label_ids is None:
label_ids = inputs["labels"].detach()
else:
label_ids = torch.cat((label_ids, inputs["labels"].detach()), dim=0)
if self.args.local_rank != -1:
# In distributed mode, concatenate all results from all nodes:
if preds is not None:
preds = self.distributed_concat(preds, num_total_examples=self.num_examples(dataloader))
if label_ids is not None:
label_ids = self.distributed_concat(label_ids, num_total_examples=self.num_examples(dataloader))
elif is_tpu_available():
# tpu-comment: Get all predictions and labels from all worker shards of eval dataset
if preds is not None:
preds = xm.mesh_reduce("eval_preds", preds, torch.cat)
if label_ids is not None:
label_ids = xm.mesh_reduce("eval_label_ids", label_ids, torch.cat)
# Finally, turn the aggregated tensors into numpy arrays.
if preds is not None:
preds = preds.cpu().numpy()
if label_ids is not None:
label_ids = label_ids.cpu().numpy()
if self.compute_metrics is not None and preds is not None and label_ids is not None:
metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))
else:
metrics = {}
if len(eval_losses) > 0:
metrics["eval_loss"] = np.mean(eval_losses)
# Prefix all keys with eval_
for key in list(metrics.keys()):
if not key.startswith("eval_"):
metrics[f"eval_{key}"] = metrics.pop(key)
return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics)
def distributed_concat(self, tensor: torch.Tensor, num_total_examples: int) -> torch.Tensor:
assert self.args.local_rank != -1
output_tensors = [tensor.clone() for _ in range(torch.distributed.get_world_size())]
torch.distributed.all_gather(output_tensors, tensor)
concat = torch.cat(output_tensors, dim=0)
# truncate the dummy elements added by SequentialDistributedSampler
output = concat[:num_total_examples]
return output
| 41.901408
| 131
| 0.626807
|
f2f6b2a5b81fd8cc7f7ae8ca7b093f426a2cbfc2
| 3,672
|
py
|
Python
|
huaweicloud-sdk-waf/huaweicloudsdkwaf/v1/model/list_event_response.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 64
|
2020-06-12T07:05:07.000Z
|
2022-03-30T03:32:50.000Z
|
huaweicloud-sdk-waf/huaweicloudsdkwaf/v1/model/list_event_response.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 11
|
2020-07-06T07:56:54.000Z
|
2022-01-11T11:14:40.000Z
|
huaweicloud-sdk-waf/huaweicloudsdkwaf/v1/model/list_event_response.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 24
|
2020-06-08T11:42:13.000Z
|
2022-03-04T06:44:08.000Z
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ListEventResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'total': 'int',
'items': 'list[ListEventItems]'
}
attribute_map = {
'total': 'total',
'items': 'items'
}
def __init__(self, total=None, items=None):
"""ListEventResponse - a model defined in huaweicloud sdk"""
super(ListEventResponse, self).__init__()
self._total = None
self._items = None
self.discriminator = None
if total is not None:
self.total = total
if items is not None:
self.items = items
@property
def total(self):
"""Gets the total of this ListEventResponse.
攻击事件数量
:return: The total of this ListEventResponse.
:rtype: int
"""
return self._total
@total.setter
def total(self, total):
"""Sets the total of this ListEventResponse.
攻击事件数量
:param total: The total of this ListEventResponse.
:type: int
"""
self._total = total
@property
def items(self):
"""Gets the items of this ListEventResponse.
攻击事件详情
:return: The items of this ListEventResponse.
:rtype: list[ListEventItems]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this ListEventResponse.
攻击事件详情
:param items: The items of this ListEventResponse.
:type: list[ListEventItems]
"""
self._items = items
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListEventResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 25.859155
| 79
| 0.548747
|
2f57c4c2b916da43e0bcf3cea00422c67529b9e2
| 2,929
|
py
|
Python
|
lib/smoothstreams/windowutils.py
|
slackr/kodi-smoothstreams-addon
|
c41fc02c8fe27e7123057298c1f3223d4d9ae261
|
[
"BSD-3-Clause"
] | null | null | null |
lib/smoothstreams/windowutils.py
|
slackr/kodi-smoothstreams-addon
|
c41fc02c8fe27e7123057298c1f3223d4d9ae261
|
[
"BSD-3-Clause"
] | null | null | null |
lib/smoothstreams/windowutils.py
|
slackr/kodi-smoothstreams-addon
|
c41fc02c8fe27e7123057298c1f3223d4d9ae261
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import, division, unicode_literals
import threading
import xbmcgui, xbmc
from . import player
class ActionHandler(object):
def __init__(self,callback):
self.callback = callback
self.event = threading.Event()
self.event.clear()
self.timer = None
self.delay = 0.001
def onAction(self,action):
if self.timer: self.timer.cancel()
if self.event.isSet(): return
self.timer = threading.Timer(self.delay,self.doAction,args=[action])
self.timer.start()
def doAction(self,action):
self.event.set()
try:
self.callback(action)
finally:
self.event.clear()
def clear(self):
if self.timer: self.timer.cancel()
return self.event.isSet()
class FakeActionHandler(object):
def __init__(self,callback):
self.callback = callback
def onAction(self,action):
self.callback(action)
def clear(self):
return False
class BaseWindow(xbmcgui.WindowXML):
def __init__(self,*args,**kwargs):
self._closing = False
self._winID = ''
def onInit(self):
self._winID = xbmcgui.getCurrentWindowId()
def setProperty(self,key,value):
if self._closing: return
xbmcgui.Window(self._winID).setProperty(key,value)
xbmcgui.WindowXMLDialog.setProperty(self,key,value)
def doClose(self):
self._closing = True
self.close()
def onClosed(self): pass
class BaseDialog(xbmcgui.WindowXMLDialog):
def __init__(self,*args,**kwargs):
self._closing = False
self._winID = ''
def onInit(self):
self._winID = xbmcgui.getCurrentWindowDialogId()
def setProperty(self,key,value):
if self._closing: return
xbmcgui.Window(self._winID).setProperty(key,value)
xbmcgui.WindowXMLDialog.setProperty(self,key,value)
def doClose(self):
self._closing = True
self.close()
def onClosed(self): pass
class KodiChannelEntry(BaseDialog):
def __init__(self,*args,**kwargs):
self.viewManager = kwargs['viewManager']
self.digits = kwargs['digit']
self.digit2 = None
self.set = False
self.digitFileBase = 'numbers/{0}.png'
BaseDialog.__init__(self,*args,**kwargs)
def onInit(self):
BaseDialog.onInit(self)
self.setProperty('digit1',self.digitFileBase.format(self.digits))
def onAction(self, action):
try:
if action == xbmcgui.ACTION_SELECT_ITEM:
self.finish()
else:
self.handleDigit(action)
finally:
BaseDialog.onAction(self,action)
def handleDigit(self, action):
if action.getId() >= xbmcgui.REMOTE_0 and action.getId() <= xbmcgui.REMOTE_9:
if self.digit2:
digit3 = str(action.getId() - 58)
self.digits += digit3
self.setProperty('digit3',self.digitFileBase.format(digit3))
self.setProperty('number',self.digits)
xbmc.sleep(100)
else:
self.digit2 = str(action.getId() - 58)
self.digits += self.digit2
self.setProperty('digit2',self.digitFileBase.format(self.digit2))
def finish(self):
self.digits = int(self.digits)
self.set = True
player.initPlay(str(self.digits))
self.close()
| 24.613445
| 80
| 0.722431
|
6f077ef28914fc78b92263aeab218c6cef6e6887
| 233
|
py
|
Python
|
packages/algo-py/solutions/algo_03_test.py
|
protiumx/algo
|
0c4d8dc9ca8f8ec30793f73bef9718fee2300297
|
[
"MIT"
] | null | null | null |
packages/algo-py/solutions/algo_03_test.py
|
protiumx/algo
|
0c4d8dc9ca8f8ec30793f73bef9718fee2300297
|
[
"MIT"
] | null | null | null |
packages/algo-py/solutions/algo_03_test.py
|
protiumx/algo
|
0c4d8dc9ca8f8ec30793f73bef9718fee2300297
|
[
"MIT"
] | null | null | null |
import unittest
from solutions import max_substr
class TestAlgo_03(unittest.TestCase):
def test(self):
input = "abcbghjkb"
self.assertEqual(max_substr(input), 6)
if __name__ == '__main__':
unittest.main()
| 17.923077
| 46
| 0.690987
|
4175c5749272175f21659d8893d818d1df2cea4e
| 299
|
py
|
Python
|
scripts/smooth_image.py
|
acse-yz11721/environments_mpm
|
84367a0eb41fd06883b44d2c2dfe2f4f47efd465
|
[
"MIT"
] | null | null | null |
scripts/smooth_image.py
|
acse-yz11721/environments_mpm
|
84367a0eb41fd06883b44d2c2dfe2f4f47efd465
|
[
"MIT"
] | null | null | null |
scripts/smooth_image.py
|
acse-yz11721/environments_mpm
|
84367a0eb41fd06883b44d2c2dfe2f4f47efd465
|
[
"MIT"
] | null | null | null |
from envtest import smooth_image
from scipy import misc
import matplotlib.pyplot as plt
image = misc.ascent()
sigma = 5
smoothed_image = smooth_image(image, sigma)
f = plt.figure()
f.add_subplot(1, 2, 1)
plt.imshow(image)
f.add_subplot(1, 2, 2)
plt.imshow(smoothed_image)
plt.show(block=True)
| 15.736842
| 43
| 0.752508
|
678bc438e2f831439965dcada8eda3b0e3f06bec
| 751
|
py
|
Python
|
problems/tree/Solution1315.py
|
akalu/cs-problems-python
|
9b1bd8e3932be62135a38a77f955ded9a766b654
|
[
"MIT"
] | null | null | null |
problems/tree/Solution1315.py
|
akalu/cs-problems-python
|
9b1bd8e3932be62135a38a77f955ded9a766b654
|
[
"MIT"
] | null | null | null |
problems/tree/Solution1315.py
|
akalu/cs-problems-python
|
9b1bd8e3932be62135a38a77f955ded9a766b654
|
[
"MIT"
] | null | null | null |
""" Given a binary tree, return the sum of values of nodes with even-valued
grandparent. (A grandparent of a node is the parent of its parent, if it
exists.)
If there are no nodes with an even-valued grandparent, return 0
IDEA:
traverse the tree using a simulation:
wrap each "traversed" node into Pair and preserve even-odd information within it
0 4 <-- has no parent so even=false
/ \
1 3 8 <-- has an even parent "4"
/ \
2 1 6 <-- has an odd parent "3"
\
3 2 <-- has an even parent "6"
4
"""
class Solution1315:
pass
| 31.291667
| 83
| 0.49534
|
b2c681b33c10ba1b195ff5ca926254e361a70415
| 5,198
|
py
|
Python
|
django/utils/deprecation.py
|
ni-ning/django
|
2e7ba6057cfc82a15a22b6021cd60cf307152e2d
|
[
"CNRI-Python-GPL-Compatible",
"BSD-3-Clause"
] | 7
|
2021-07-10T14:32:29.000Z
|
2021-07-10T16:14:09.000Z
|
django/utils/deprecation.py
|
ni-ning/django
|
2e7ba6057cfc82a15a22b6021cd60cf307152e2d
|
[
"CNRI-Python-GPL-Compatible",
"BSD-3-Clause"
] | 21
|
2021-02-04T01:37:44.000Z
|
2022-03-12T01:00:55.000Z
|
django/utils/deprecation.py
|
ni-ning/django
|
2e7ba6057cfc82a15a22b6021cd60cf307152e2d
|
[
"CNRI-Python-GPL-Compatible",
"BSD-3-Clause"
] | 27
|
2021-11-10T08:44:10.000Z
|
2022-03-30T08:19:46.000Z
|
import asyncio
import inspect
import warnings
from asgiref.sync import sync_to_async
class RemovedInDjango40Warning(DeprecationWarning):
pass
class RemovedInDjango41Warning(PendingDeprecationWarning):
pass
RemovedInNextVersionWarning = RemovedInDjango40Warning
class warn_about_renamed_method:
def __init__(self, class_name, old_method_name, new_method_name, deprecation_warning):
self.class_name = class_name
self.old_method_name = old_method_name
self.new_method_name = new_method_name
self.deprecation_warning = deprecation_warning
def __call__(self, f):
def wrapped(*args, **kwargs):
warnings.warn(
"`%s.%s` is deprecated, use `%s` instead." %
(self.class_name, self.old_method_name, self.new_method_name),
self.deprecation_warning, 2)
return f(*args, **kwargs)
return wrapped
class RenameMethodsBase(type):
"""
Handles the deprecation paths when renaming a method.
It does the following:
1) Define the new method if missing and complain about it.
2) Define the old method if missing.
3) Complain whenever an old method is called.
See #15363 for more details.
"""
renamed_methods = ()
def __new__(cls, name, bases, attrs):
new_class = super().__new__(cls, name, bases, attrs)
for base in inspect.getmro(new_class):
class_name = base.__name__
for renamed_method in cls.renamed_methods:
old_method_name = renamed_method[0]
old_method = base.__dict__.get(old_method_name)
new_method_name = renamed_method[1]
new_method = base.__dict__.get(new_method_name)
deprecation_warning = renamed_method[2]
wrapper = warn_about_renamed_method(class_name, *renamed_method)
# Define the new method if missing and complain about it
if not new_method and old_method:
warnings.warn(
"`%s.%s` method should be renamed `%s`." %
(class_name, old_method_name, new_method_name),
deprecation_warning, 2)
setattr(base, new_method_name, old_method)
setattr(base, old_method_name, wrapper(old_method))
# Define the old method as a wrapped call to the new method.
if not old_method and new_method:
setattr(base, old_method_name, wrapper(new_method))
return new_class
class DeprecationInstanceCheck(type):
def __instancecheck__(self, instance):
warnings.warn(
"`%s` is deprecated, use `%s` instead." % (self.__name__, self.alternative),
self.deprecation_warning, 2
)
return super().__instancecheck__(instance)
class MiddlewareMixin:
sync_capable = True
async_capable = True
# RemovedInDjango40Warning: when the deprecation ends, replace with:
# def __init__(self, get_response):
def __init__(self, get_response=None):
self._get_response_none_deprecation(get_response)
self.get_response = get_response
self._async_check()
super().__init__()
def _async_check(self):
"""
If get_response is a coroutine function, turns us into async mode so
a thread is not consumed during a whole request.
"""
if asyncio.iscoroutinefunction(self.get_response):
# Mark the class as async-capable, but do the actual switch
# inside __call__ to avoid swapping out dunder methods
self._is_coroutine = asyncio.coroutines._is_coroutine
def __call__(self, request):
# Exit out to async mode, if needed
if asyncio.iscoroutinefunction(self.get_response):
return self.__acall__(request)
response = None
if hasattr(self, 'process_request'):
response = self.process_request(request)
response = response or self.get_response(request)
if hasattr(self, 'process_response'):
response = self.process_response(request, response)
return response
async def __acall__(self, request):
"""
Async version of __call__ that is swapped in when an async request
is running.
"""
response = None
if hasattr(self, 'process_request'):
response = await sync_to_async(
self.process_request,
thread_sensitive=True,
)(request)
response = response or await self.get_response(request)
if hasattr(self, 'process_response'):
response = await sync_to_async(
self.process_response,
thread_sensitive=True,
)(request, response)
return response
def _get_response_none_deprecation(self, get_response):
if get_response is None:
warnings.warn(
'Passing None for the middleware get_response argument is '
'deprecated.',
RemovedInDjango40Warning, stacklevel=3,
)
| 35.121622
| 90
| 0.63005
|
ec7822f7846571e59c54fac56e729766062f7f8b
| 1,729
|
py
|
Python
|
solutions/day11/test_lib.py
|
benjaminarjun/AdventOfCode2020
|
b9ca2f5c6121c401eb79911dbbbd0d3188f38034
|
[
"MIT"
] | 1
|
2020-12-04T17:57:24.000Z
|
2020-12-04T17:57:24.000Z
|
solutions/day11/test_lib.py
|
benjaminarjun/AdventOfCode2020
|
b9ca2f5c6121c401eb79911dbbbd0d3188f38034
|
[
"MIT"
] | null | null | null |
solutions/day11/test_lib.py
|
benjaminarjun/AdventOfCode2020
|
b9ca2f5c6121c401eb79911dbbbd0d3188f38034
|
[
"MIT"
] | null | null | null |
import unittest
import pathlib
from parameterized import parameterized
from .results import Grid, TerminalSeating
def _load_data(file_name):
full_path = pathlib.Path(__file__).parent.joinpath('test_vals', file_name)
with open(full_path, 'r') as f:
return [line.strip() for line in f.readlines()]
class TestGrid(unittest.TestCase):
def test_get(self):
grid = Grid(['as', 'df'])
coord = (1, 0)
self.assertEqual('d', grid.get_val(coord))
def test_set(self):
grid = Grid(['as', 'df'])
coord = (1, 0)
grid.set_val(coord, 'g')
self.assertEqual('g', grid.get_val(coord))
class TestTerminalSeating(unittest.TestCase):
def test_ex_1_setup(self):
initial = _load_data('initial.txt')
seating = TerminalSeating(initial)
self.assertEqual(Grid(initial), seating.initial)
self.assertEqual(Grid(initial), seating.current)
self.assertEqual(0, seating.epoch)
@parameterized.expand([
[_load_data('expected_epoch_1.txt'), 1],
[_load_data('expected_epoch_2.txt'), 2],
[_load_data('expected_epoch_3.txt'), 3],
[_load_data('expected_epoch_4.txt'), 4],
[_load_data('expected_epoch_5.txt'), 5],
])
def test_ex_1_state_after_num_epochs(self, expected, num_epochs):
initial = _load_data('initial.txt')
seating = TerminalSeating(initial)
seating.run_epochs(num_epochs)
self.assertEqual(expected, seating.current)
def test_equilibrium_finder(self):
initial = _load_data('initial.txt')
seating = TerminalSeating(initial)
seating.run_until_equilibrium()
self.assertEqual(6, seating.equilibrium_epoch)
| 29.810345
| 78
| 0.659341
|
4fa933537305faf5e3b123afc79671c365a0a0c4
| 1,443
|
py
|
Python
|
exercises/nth-prime/nth_prime_test.py
|
kishankj/python
|
82042de746128127502e109111e6c4e8ab002af6
|
[
"MIT"
] | 2
|
2019-10-02T07:18:44.000Z
|
2019-10-07T11:11:39.000Z
|
exercises/nth-prime/nth_prime_test.py
|
kishankj/python
|
82042de746128127502e109111e6c4e8ab002af6
|
[
"MIT"
] | 7
|
2021-03-31T18:51:01.000Z
|
2022-02-03T16:40:58.000Z
|
exercises/nth-prime/nth_prime_test.py
|
kishankj/python
|
82042de746128127502e109111e6c4e8ab002af6
|
[
"MIT"
] | 2
|
2020-05-08T19:00:28.000Z
|
2020-06-05T02:04:12.000Z
|
import unittest
from nth_prime import prime
# Tests adapted from `problem-specifications//canonical-data.json` @ v2.1.0
def prime_range(n):
"""Returns a list of the first n primes"""
return [prime(i) for i in range(1, n + 1)]
class NthPrimeTest(unittest.TestCase):
def test_first_prime(self):
self.assertEqual(prime(1), 2)
def test_second_prime(self):
self.assertEqual(prime(2), 3)
def test_sixth_prime(self):
self.assertEqual(prime(6), 13)
def test_big_prime(self):
self.assertEqual(prime(10001), 104743)
def test_there_is_no_zeroth_prime(self):
with self.assertRaisesWithMessage(ValueError):
prime(0)
# Additional tests for this track
def test_first_twenty_primes(self):
self.assertEqual(
prime_range(20),
[
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
],
)
# Utility functions
def assertRaisesWithMessage(self, exception):
return self.assertRaisesRegex(exception, r".+")
if __name__ == "__main__":
unittest.main()
| 21.863636
| 75
| 0.495495
|
709ed2305023db10cc041e135de5431a13a6c366
| 2,072
|
py
|
Python
|
jaconv/__init__.py
|
ikegami-yukino/jaconv
|
a12ffd3814e530d22af0faf7b50f06bc92b44f11
|
[
"MIT"
] | 215
|
2016-04-02T10:38:10.000Z
|
2022-03-29T07:56:23.000Z
|
jaconv/__init__.py
|
ikegami-yukino/jaconv
|
a12ffd3814e530d22af0faf7b50f06bc92b44f11
|
[
"MIT"
] | 18
|
2017-02-20T05:05:08.000Z
|
2021-10-11T13:55:07.000Z
|
jaconv/__init__.py
|
ikegami-yukino/jaconv
|
a12ffd3814e530d22af0faf7b50f06bc92b44f11
|
[
"MIT"
] | 25
|
2016-10-03T21:37:13.000Z
|
2022-02-01T03:44:34.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from . import jaconv
"""jaconv
This module provides Japanese and ASCII character interconverting between
Hiragana and full-/half-width Katakana/ASCII characters.
Author:
Yukino Ikegami
Lisence:
MIT License
Usage:
import jaconv
jaconv.hira2kata(text, [ignore]) # ひらがなを全角カタカナに変換
jaconv.hira2hkata(text, [ignore]) # ひらがなを半角カタカナに変換
jaconv.kata2hira(text, [ignore]) # 全角カタカナをひらがなに変換
jaconv.enlargesmallkana(text, [ignore]) # 小文字かなを大文字かなに変換
jaconv.h2z(text, [ignore, kana, ascii, digit]) # 半角文字を全角文字に変換
jaconv.z2h(text, [ignore, kana, ascii, digit]) # 全角文字を半角文字に変換
jaconv.han2zen(text, [ignore, kana, ascii, digit]) # 半角文字を全角文字に変換
jaconv.zen2han(text, [ignore, kana, ascii, digit]) # 全角文字を半角文字に変換
jaconv.normalize(text, [nomalizemode]) # 半角カナを全角カナへ、全角英数字を半角英数字に変換
jaconv.kana2alphabet(text) # かなをヘボン式アルファベットに変換
jaconv.alphabet2kana(text) # アルファベットをかなに変換
jaconv.kata2alphabet(text) # カタカナをアルファベットに変換
jaconv.alphabet2kata(text) # アルファベットをカタカナに変換
jaconv.hiragana2julius(text) # ひらがなをJuliusの音素表現に変換
"""
VERSION = (0, 3)
__version__ = '0.3'
__all__ = ['hira2kata', 'hira2hkata', 'kata2hira', 'h2z', 'z2h',
'hankaku2zenkaku', 'zenkaku2hankaku', 'normalize',
'kana2alphabet', 'alphabet2kana', 'kata2alphabet', 'alphabet2kata',
'hiragana2julius', 'enlargesmallkana']
hira2kata = jaconv.hira2kata
hira2hkata = jaconv.hira2hkata
kata2hira = jaconv.kata2hira
h2z = jaconv.h2z
z2h = jaconv.z2h
han2zen = jaconv.h2z # an alias of h2z
zen2han = jaconv.z2h # an alias of z2h
hankaku2zenkaku = jaconv.h2z # an alias of h2z
zenkaku2hankaku = jaconv.z2h # an alias of z2h
normalize = jaconv.normalize
kana2alphabet = jaconv.kana2alphabet
alphabet2kana = jaconv.alphabet2kana
kata2alphabet = lambda text: jaconv.kana2alphabet(jaconv.kata2hira(text))
alphabet2kata = lambda text: jaconv.hira2kata(jaconv.alphabet2kana(text))
hiragana2julius = jaconv.hiragana2julius
enlargesmallkana = jaconv.enlargesmallkana
| 37
| 78
| 0.7389
|
3b644d67578b8aea3f784d3eda5720706df065a4
| 2,710
|
py
|
Python
|
azure-mgmt-network/azure/mgmt/network/v2018_07_01/models/azure_firewall_network_rule_collection_py3.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 1
|
2021-09-07T18:36:04.000Z
|
2021-09-07T18:36:04.000Z
|
azure-mgmt-network/azure/mgmt/network/v2018_07_01/models/azure_firewall_network_rule_collection_py3.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 2
|
2019-10-02T23:37:38.000Z
|
2020-10-02T01:17:31.000Z
|
azure-mgmt-network/azure/mgmt/network/v2018_07_01/models/azure_firewall_network_rule_collection_py3.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 1
|
2018-08-28T14:36:47.000Z
|
2018-08-28T14:36:47.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource_py3 import SubResource
class AzureFirewallNetworkRuleCollection(SubResource):
"""Network rule collection resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:param priority: Priority of the network rule collection resource.
:type priority: int
:param action: The action type of a rule collection
:type action: ~azure.mgmt.network.v2018_07_01.models.AzureFirewallRCAction
:param rules: Collection of rules used by a network rule collection.
:type rules:
list[~azure.mgmt.network.v2018_07_01.models.AzureFirewallNetworkRule]
:param provisioning_state: The provisioning state of the resource.
Possible values include: 'Succeeded', 'Updating', 'Deleting', 'Failed'
:type provisioning_state: str or
~azure.mgmt.network.v2018_07_01.models.ProvisioningState
:param name: Gets name of the resource that is unique within a resource
group. This name can be used to access the resource.
:type name: str
:ivar etag: Gets a unique read-only string that changes whenever the
resource is updated.
:vartype etag: str
"""
_validation = {
'priority': {'maximum': 65000, 'minimum': 100},
'etag': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'priority': {'key': 'properties.priority', 'type': 'int'},
'action': {'key': 'properties.action', 'type': 'AzureFirewallRCAction'},
'rules': {'key': 'properties.rules', 'type': '[AzureFirewallNetworkRule]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, *, id: str=None, priority: int=None, action=None, rules=None, provisioning_state=None, name: str=None, **kwargs) -> None:
super(AzureFirewallNetworkRuleCollection, self).__init__(id=id, **kwargs)
self.priority = priority
self.action = action
self.rules = rules
self.provisioning_state = provisioning_state
self.name = name
self.etag = None
| 41.692308
| 144
| 0.637638
|
2789646d91a850470c0e8b0dbb5af2f2df9cb620
| 7,180
|
py
|
Python
|
sdk/python/pulumi_aws_native/cloudfront/streaming_distribution.py
|
AaronFriel/pulumi-aws-native
|
5621690373ac44accdbd20b11bae3be1baf022d1
|
[
"Apache-2.0"
] | 29
|
2021-09-30T19:32:07.000Z
|
2022-03-22T21:06:08.000Z
|
sdk/python/pulumi_aws_native/cloudfront/streaming_distribution.py
|
AaronFriel/pulumi-aws-native
|
5621690373ac44accdbd20b11bae3be1baf022d1
|
[
"Apache-2.0"
] | 232
|
2021-09-30T19:26:26.000Z
|
2022-03-31T23:22:06.000Z
|
sdk/python/pulumi_aws_native/cloudfront/streaming_distribution.py
|
AaronFriel/pulumi-aws-native
|
5621690373ac44accdbd20b11bae3be1baf022d1
|
[
"Apache-2.0"
] | 4
|
2021-11-10T19:42:01.000Z
|
2022-02-05T10:15:49.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['StreamingDistributionArgs', 'StreamingDistribution']
@pulumi.input_type
class StreamingDistributionArgs:
def __init__(__self__, *,
streaming_distribution_config: pulumi.Input['StreamingDistributionConfigArgs'],
tags: pulumi.Input[Sequence[pulumi.Input['StreamingDistributionTagArgs']]]):
"""
The set of arguments for constructing a StreamingDistribution resource.
"""
pulumi.set(__self__, "streaming_distribution_config", streaming_distribution_config)
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="streamingDistributionConfig")
def streaming_distribution_config(self) -> pulumi.Input['StreamingDistributionConfigArgs']:
return pulumi.get(self, "streaming_distribution_config")
@streaming_distribution_config.setter
def streaming_distribution_config(self, value: pulumi.Input['StreamingDistributionConfigArgs']):
pulumi.set(self, "streaming_distribution_config", value)
@property
@pulumi.getter
def tags(self) -> pulumi.Input[Sequence[pulumi.Input['StreamingDistributionTagArgs']]]:
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: pulumi.Input[Sequence[pulumi.Input['StreamingDistributionTagArgs']]]):
pulumi.set(self, "tags", value)
warnings.warn("""StreamingDistribution is not yet supported by AWS Native, so its creation will currently fail. Please use the classic AWS provider, if possible.""", DeprecationWarning)
class StreamingDistribution(pulumi.CustomResource):
warnings.warn("""StreamingDistribution is not yet supported by AWS Native, so its creation will currently fail. Please use the classic AWS provider, if possible.""", DeprecationWarning)
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
streaming_distribution_config: Optional[pulumi.Input[pulumi.InputType['StreamingDistributionConfigArgs']]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['StreamingDistributionTagArgs']]]]] = None,
__props__=None):
"""
Resource Type definition for AWS::CloudFront::StreamingDistribution
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: StreamingDistributionArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Resource Type definition for AWS::CloudFront::StreamingDistribution
:param str resource_name: The name of the resource.
:param StreamingDistributionArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(StreamingDistributionArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
streaming_distribution_config: Optional[pulumi.Input[pulumi.InputType['StreamingDistributionConfigArgs']]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['StreamingDistributionTagArgs']]]]] = None,
__props__=None):
pulumi.log.warn("""StreamingDistribution is deprecated: StreamingDistribution is not yet supported by AWS Native, so its creation will currently fail. Please use the classic AWS provider, if possible.""")
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = StreamingDistributionArgs.__new__(StreamingDistributionArgs)
if streaming_distribution_config is None and not opts.urn:
raise TypeError("Missing required property 'streaming_distribution_config'")
__props__.__dict__["streaming_distribution_config"] = streaming_distribution_config
if tags is None and not opts.urn:
raise TypeError("Missing required property 'tags'")
__props__.__dict__["tags"] = tags
__props__.__dict__["domain_name"] = None
super(StreamingDistribution, __self__).__init__(
'aws-native:cloudfront:StreamingDistribution',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'StreamingDistribution':
"""
Get an existing StreamingDistribution resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = StreamingDistributionArgs.__new__(StreamingDistributionArgs)
__props__.__dict__["domain_name"] = None
__props__.__dict__["streaming_distribution_config"] = None
__props__.__dict__["tags"] = None
return StreamingDistribution(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="domainName")
def domain_name(self) -> pulumi.Output[str]:
return pulumi.get(self, "domain_name")
@property
@pulumi.getter(name="streamingDistributionConfig")
def streaming_distribution_config(self) -> pulumi.Output['outputs.StreamingDistributionConfig']:
return pulumi.get(self, "streaming_distribution_config")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Sequence['outputs.StreamingDistributionTag']]:
return pulumi.get(self, "tags")
| 47.236842
| 212
| 0.692061
|
51003e667ed4b3afd9fa7c7fc19d163879d59dbe
| 4,176
|
py
|
Python
|
coremltools/converters/mil/frontend/torch/test/testing_utils.py
|
prakriti07/coremltools
|
82c916d4925ac4dc380e20a595011abb44a35335
|
[
"BSD-3-Clause"
] | null | null | null |
coremltools/converters/mil/frontend/torch/test/testing_utils.py
|
prakriti07/coremltools
|
82c916d4925ac4dc380e20a595011abb44a35335
|
[
"BSD-3-Clause"
] | null | null | null |
coremltools/converters/mil/frontend/torch/test/testing_utils.py
|
prakriti07/coremltools
|
82c916d4925ac4dc380e20a595011abb44a35335
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import numpy as np
import torch
from six import string_types as _string_types
from coremltools import TensorType
from coremltools.converters import convert
from coremltools.models import MLModel
from coremltools._deps import _IS_MACOS
def _flatten(object):
flattened_list = []
for item in object:
if isinstance(item, (list, tuple)):
flattened_list.extend(_flatten(item))
else:
flattened_list.append(item)
return flattened_list
def convert_to_coreml_inputs(input_description, inputs):
"""Convenience function to combine a CoreML model's input description and
set of raw inputs into the format expected by the model's predict function.
"""
flattened_inputs = _flatten(inputs)
coreml_inputs = {
str(x): inp.numpy() for x, inp in zip(input_description, flattened_inputs)
}
return coreml_inputs
def convert_to_mlmodel(model_spec, tensor_inputs):
def _convert_to_inputtype(inputs):
if isinstance(inputs, list):
return [_convert_to_inputtype(x) for x in inputs]
elif isinstance(inputs, tuple):
return tuple([_convert_to_inputtype(x) for x in inputs])
elif isinstance(inputs, torch.Tensor):
return TensorType(shape=inputs.shape)
else:
raise ValueError(
"Unable to parse type {} into InputType.".format(type(inputs))
)
mlmodel = convert(model_spec, inputs=list(_convert_to_inputtype(tensor_inputs)))
return mlmodel
def generate_input_data(input_size):
if isinstance(input_size, list):
return [torch.rand(_size) for _size in input_size]
else:
return torch.rand(input_size)
def trace_model(model, input_data):
model.eval()
if isinstance(input_data, list):
input_data = tuple(input_data)
torch_model = torch.jit.trace(model, input_data)
return torch_model
def run_numerical_test(
input_data, model, expected_results=None, places=5, input_as_shape=True
):
"""
Traces a model and runs a numerical test.
Args:
input_as_shape <bool>: If true generates random input data with shape.
expected_results <iterable, optional>: Expected result from running pytorch model.
"""
model.eval()
if input_as_shape:
input_data = generate_input_data(input_data)
model_spec = trace_model(model, input_data)
convert_and_compare(
input_data, model_spec, expected_results=expected_results, atol=10.0 ** -places
)
def flatten_and_detach_torch_results(torch_results):
if isinstance(torch_results, (list, tuple)):
return [x.detach().numpy() for x in _flatten(torch_results)]
# Do not need to flatten
return [torch_results.detach().numpy()]
def convert_and_compare(input_data, model_spec, expected_results=None, atol=1e-5):
"""
If expected results is not set, it will by default
be set to the flattened output of the torch model.
"""
if isinstance(model_spec, _string_types):
torch_model = torch.jit.load(model_spec)
else:
torch_model = model_spec
if not isinstance(input_data, (list, tuple)):
input_data = [input_data]
if not expected_results:
expected_results = torch_model(*input_data)
expected_results = flatten_and_detach_torch_results(expected_results)
mlmodel = convert_to_mlmodel(model_spec, input_data)
coreml_inputs = convert_to_coreml_inputs(mlmodel.input_description, input_data)
if _IS_MACOS:
coreml_results = mlmodel.predict(coreml_inputs)
sorted_coreml_results = [
coreml_results[key] for key in sorted(coreml_results.keys())
]
for torch_result, coreml_result in zip(expected_results, sorted_coreml_results):
np.testing.assert_equal(coreml_result.shape, torch_result.shape)
np.testing.assert_allclose(coreml_result, torch_result, atol=atol)
| 34.512397
| 94
| 0.702826
|
e84e362e47b190a8c03f8b56f326101c33393323
| 10,685
|
py
|
Python
|
nssrc/com/citrix/netscaler/nitro/resource/config/cs/csvserver_cachepolicy_binding.py
|
mahabs/nitro
|
be74e1e177f5c205c16126bc9b023f2348788409
|
[
"Apache-2.0"
] | null | null | null |
nssrc/com/citrix/netscaler/nitro/resource/config/cs/csvserver_cachepolicy_binding.py
|
mahabs/nitro
|
be74e1e177f5c205c16126bc9b023f2348788409
|
[
"Apache-2.0"
] | null | null | null |
nssrc/com/citrix/netscaler/nitro/resource/config/cs/csvserver_cachepolicy_binding.py
|
mahabs/nitro
|
be74e1e177f5c205c16126bc9b023f2348788409
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class csvserver_cachepolicy_binding(base_resource) :
""" Binding class showing the cachepolicy that can be bound to csvserver.
"""
def __init__(self) :
self._policyname = ""
self._priority = 0
self._gotopriorityexpression = ""
self._bindpoint = ""
self._invoke = False
self._labeltype = ""
self._labelname = ""
self._name = ""
self._targetlbvserver = ""
self.___count = 0
@property
def priority(self) :
"""Priority for the policy.
"""
try :
return self._priority
except Exception as e:
raise e
@priority.setter
def priority(self, priority) :
"""Priority for the policy.
"""
try :
self._priority = priority
except Exception as e:
raise e
@property
def bindpoint(self) :
"""The bindpoint to which the policy is bound.<br/>Possible values = REQUEST, RESPONSE.
"""
try :
return self._bindpoint
except Exception as e:
raise e
@bindpoint.setter
def bindpoint(self, bindpoint) :
"""The bindpoint to which the policy is bound.<br/>Possible values = REQUEST, RESPONSE
"""
try :
self._bindpoint = bindpoint
except Exception as e:
raise e
@property
def policyname(self) :
"""Policies bound to this vserver.
"""
try :
return self._policyname
except Exception as e:
raise e
@policyname.setter
def policyname(self, policyname) :
"""Policies bound to this vserver.
"""
try :
self._policyname = policyname
except Exception as e:
raise e
@property
def labelname(self) :
"""Name of the label invoked.
"""
try :
return self._labelname
except Exception as e:
raise e
@labelname.setter
def labelname(self, labelname) :
"""Name of the label invoked.
"""
try :
self._labelname = labelname
except Exception as e:
raise e
@property
def name(self) :
"""Name of the content switching virtual server to which the content switching policy applies.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
"""Name of the content switching virtual server to which the content switching policy applies.<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def targetlbvserver(self) :
"""Name of the Load Balancing virtual server to which the content is switched, if policy rule is evaluated to be TRUE.
Example: bind cs vs cs1 -policyname pol1 -priority 101 -targetLBVserver lb1
Note: Use this parameter only in case of Content Switching policy bind operations to a CS vserver.
"""
try :
return self._targetlbvserver
except Exception as e:
raise e
@targetlbvserver.setter
def targetlbvserver(self, targetlbvserver) :
"""Name of the Load Balancing virtual server to which the content is switched, if policy rule is evaluated to be TRUE.
Example: bind cs vs cs1 -policyname pol1 -priority 101 -targetLBVserver lb1
Note: Use this parameter only in case of Content Switching policy bind operations to a CS vserver
"""
try :
self._targetlbvserver = targetlbvserver
except Exception as e:
raise e
@property
def gotopriorityexpression(self) :
"""Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE.
"""
try :
return self._gotopriorityexpression
except Exception as e:
raise e
@gotopriorityexpression.setter
def gotopriorityexpression(self, gotopriorityexpression) :
"""Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE.
"""
try :
self._gotopriorityexpression = gotopriorityexpression
except Exception as e:
raise e
@property
def invoke(self) :
"""Invoke flag.
"""
try :
return self._invoke
except Exception as e:
raise e
@invoke.setter
def invoke(self, invoke) :
"""Invoke flag.
"""
try :
self._invoke = invoke
except Exception as e:
raise e
@property
def labeltype(self) :
"""The invocation type.<br/>Possible values = reqvserver, resvserver, policylabel.
"""
try :
return self._labeltype
except Exception as e:
raise e
@labeltype.setter
def labeltype(self, labeltype) :
"""The invocation type.<br/>Possible values = reqvserver, resvserver, policylabel
"""
try :
self._labeltype = labeltype
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(csvserver_cachepolicy_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.csvserver_cachepolicy_binding
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.name) :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = csvserver_cachepolicy_binding()
updateresource.name = resource.name
updateresource.policyname = resource.policyname
updateresource.targetlbvserver = resource.targetlbvserver
updateresource.gotopriorityexpression = resource.gotopriorityexpression
updateresource.bindpoint = resource.bindpoint
updateresource.invoke = resource.invoke
updateresource.labeltype = resource.labeltype
updateresource.labelname = resource.labelname
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [csvserver_cachepolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].name = resource[i].name
updateresources[i].policyname = resource[i].policyname
updateresources[i].targetlbvserver = resource[i].targetlbvserver
updateresources[i].gotopriorityexpression = resource[i].gotopriorityexpression
updateresources[i].bindpoint = resource[i].bindpoint
updateresources[i].invoke = resource[i].invoke
updateresources[i].labeltype = resource[i].labeltype
updateresources[i].labelname = resource[i].labelname
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = csvserver_cachepolicy_binding()
deleteresource.name = resource.name
deleteresource.policyname = resource.policyname
deleteresource.bindpoint = resource.bindpoint
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [csvserver_cachepolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
deleteresources[i].policyname = resource[i].policyname
deleteresources[i].bindpoint = resource[i].bindpoint
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service, name) :
""" Use this API to fetch csvserver_cachepolicy_binding resources.
"""
try :
obj = csvserver_cachepolicy_binding()
obj.name = name
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, name, filter_) :
""" Use this API to fetch filtered set of csvserver_cachepolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = csvserver_cachepolicy_binding()
obj.name = name
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, name) :
""" Use this API to count csvserver_cachepolicy_binding resources configued on NetScaler.
"""
try :
obj = csvserver_cachepolicy_binding()
obj.name = name
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, name, filter_) :
""" Use this API to count the filtered set of csvserver_cachepolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = csvserver_cachepolicy_binding()
obj.name = name
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class Bindpoint:
REQUEST = "REQUEST"
RESPONSE = "RESPONSE"
class Labeltype:
reqvserver = "reqvserver"
resvserver = "resvserver"
policylabel = "policylabel"
class csvserver_cachepolicy_binding_response(base_response) :
def __init__(self, length=1) :
self.csvserver_cachepolicy_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.csvserver_cachepolicy_binding = [csvserver_cachepolicy_binding() for _ in range(length)]
| 29.435262
| 131
| 0.724193
|
682250be8a879361de6a36516a6cc2a65dac4b6b
| 13,317
|
py
|
Python
|
deutschland/jobsuche/rest.py
|
kiranmusze/deutschland
|
86d8ead3f38ad88ad66bb338b9f5a8db06992344
|
[
"Apache-2.0"
] | null | null | null |
deutschland/jobsuche/rest.py
|
kiranmusze/deutschland
|
86d8ead3f38ad88ad66bb338b9f5a8db06992344
|
[
"Apache-2.0"
] | null | null | null |
deutschland/jobsuche/rest.py
|
kiranmusze/deutschland
|
86d8ead3f38ad88ad66bb338b9f5a8db06992344
|
[
"Apache-2.0"
] | null | null | null |
"""
Bundesagentur für Arbeit: Jobsuche API
Die größte Stellendatenbank Deutschlands durchsuchen, Details zu Stellenanzeigen und Informationen über Arbeitgeber abrufen. <br><br> Die Authentifizierung funktioniert per OAuth 2 Client Credentials mit JWTs. Folgende Client-Credentials können dafür verwendet werden:<br><br> **ClientID:** c003a37f-024f-462a-b36d-b001be4cd24a <br> **ClientSecret:** 32a39620-32b3-4307-9aa1-511e3d7f48a8 # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import io
import json
import logging
import re
import ssl
from urllib.parse import urlencode
import urllib3
from deutschland.jobsuche.exceptions import (
ApiException,
UnauthorizedException,
ForbiddenException,
NotFoundException,
ServiceException,
ApiValueError,
)
logger = logging.getLogger(__name__)
class RESTResponse(io.IOBase):
def __init__(self, resp):
self.urllib3_response = resp
self.status = resp.status
self.reason = resp.reason
self.data = resp.data
def getheaders(self):
"""Returns a dictionary of the response headers."""
return self.urllib3_response.getheaders()
def getheader(self, name, default=None):
"""Returns a given response header."""
return self.urllib3_response.getheader(name, default)
class RESTClientObject(object):
def __init__(self, configuration, pools_size=4, maxsize=None):
# urllib3.PoolManager will pass all kw parameters to connectionpool
# https://github.com/shazow/urllib3/blob/f9409436f83aeb79fbaf090181cd81b784f1b8ce/urllib3/poolmanager.py#L75 # noqa: E501
# https://github.com/shazow/urllib3/blob/f9409436f83aeb79fbaf090181cd81b784f1b8ce/urllib3/connectionpool.py#L680 # noqa: E501
# maxsize is the number of requests to host that are allowed in parallel # noqa: E501
# Custom SSL certificates and client certificates: http://urllib3.readthedocs.io/en/latest/advanced-usage.html # noqa: E501
# cert_reqs
if configuration.verify_ssl:
cert_reqs = ssl.CERT_REQUIRED
else:
cert_reqs = ssl.CERT_NONE
addition_pool_args = {}
if configuration.assert_hostname is not None:
addition_pool_args[
"assert_hostname"
] = configuration.assert_hostname # noqa: E501
if configuration.retries is not None:
addition_pool_args["retries"] = configuration.retries
if configuration.socket_options is not None:
addition_pool_args["socket_options"] = configuration.socket_options
if maxsize is None:
if configuration.connection_pool_maxsize is not None:
maxsize = configuration.connection_pool_maxsize
else:
maxsize = 4
# https pool manager
if configuration.proxy:
self.pool_manager = urllib3.ProxyManager(
num_pools=pools_size,
maxsize=maxsize,
cert_reqs=cert_reqs,
ca_certs=configuration.ssl_ca_cert,
cert_file=configuration.cert_file,
key_file=configuration.key_file,
proxy_url=configuration.proxy,
proxy_headers=configuration.proxy_headers,
**addition_pool_args
)
else:
self.pool_manager = urllib3.PoolManager(
num_pools=pools_size,
maxsize=maxsize,
cert_reqs=cert_reqs,
ca_certs=configuration.ssl_ca_cert,
cert_file=configuration.cert_file,
key_file=configuration.key_file,
**addition_pool_args
)
def request(
self,
method,
url,
query_params=None,
headers=None,
body=None,
post_params=None,
_preload_content=True,
_request_timeout=None,
):
"""Perform requests.
:param method: http request method
:param url: http request url
:param query_params: query parameters in the url
:param headers: http request headers
:param body: request json body, for `application/json`
:param post_params: request post parameters,
`application/x-www-form-urlencoded`
and `multipart/form-data`
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
"""
method = method.upper()
assert method in ["GET", "HEAD", "DELETE", "POST", "PUT", "PATCH", "OPTIONS"]
if post_params and body:
raise ApiValueError(
"body parameter cannot be used with post_params parameter."
)
post_params = post_params or {}
headers = headers or {}
timeout = None
if _request_timeout:
if isinstance(_request_timeout, (int, float)): # noqa: E501,F821
timeout = urllib3.Timeout(total=_request_timeout)
elif isinstance(_request_timeout, tuple) and len(_request_timeout) == 2:
timeout = urllib3.Timeout(
connect=_request_timeout[0], read=_request_timeout[1]
)
try:
# For `POST`, `PUT`, `PATCH`, `OPTIONS`, `DELETE`
if method in ["POST", "PUT", "PATCH", "OPTIONS", "DELETE"]:
# Only set a default Content-Type for POST, PUT, PATCH and OPTIONS requests
if (method != "DELETE") and ("Content-Type" not in headers):
headers["Content-Type"] = "application/json"
if query_params:
url += "?" + urlencode(query_params)
if ("Content-Type" not in headers) or (
re.search("json", headers["Content-Type"], re.IGNORECASE)
):
request_body = None
if body is not None:
request_body = json.dumps(body)
r = self.pool_manager.request(
method,
url,
body=request_body,
preload_content=_preload_content,
timeout=timeout,
headers=headers,
)
elif (
headers["Content-Type"] == "application/x-www-form-urlencoded"
): # noqa: E501
r = self.pool_manager.request(
method,
url,
fields=post_params,
encode_multipart=False,
preload_content=_preload_content,
timeout=timeout,
headers=headers,
)
elif headers["Content-Type"] == "multipart/form-data":
# must del headers['Content-Type'], or the correct
# Content-Type which generated by urllib3 will be
# overwritten.
del headers["Content-Type"]
r = self.pool_manager.request(
method,
url,
fields=post_params,
encode_multipart=True,
preload_content=_preload_content,
timeout=timeout,
headers=headers,
)
# Pass a `string` parameter directly in the body to support
# other content types than Json when `body` argument is
# provided in serialized form
elif isinstance(body, str) or isinstance(body, bytes):
request_body = body
r = self.pool_manager.request(
method,
url,
body=request_body,
preload_content=_preload_content,
timeout=timeout,
headers=headers,
)
else:
# Cannot generate the request from given parameters
msg = """Cannot prepare a request message for provided
arguments. Please check that your arguments match
declared content type."""
raise ApiException(status=0, reason=msg)
# For `GET`, `HEAD`
else:
r = self.pool_manager.request(
method,
url,
fields=query_params,
preload_content=_preload_content,
timeout=timeout,
headers=headers,
)
except urllib3.exceptions.SSLError as e:
msg = "{0}\n{1}".format(type(e).__name__, str(e))
raise ApiException(status=0, reason=msg)
if _preload_content:
r = RESTResponse(r)
# log response body
logger.debug("response body: %s", r.data)
if not 200 <= r.status <= 299:
if r.status == 401:
raise UnauthorizedException(http_resp=r)
if r.status == 403:
raise ForbiddenException(http_resp=r)
if r.status == 404:
raise NotFoundException(http_resp=r)
if 500 <= r.status <= 599:
raise ServiceException(http_resp=r)
raise ApiException(http_resp=r)
return r
def GET(
self,
url,
headers=None,
query_params=None,
_preload_content=True,
_request_timeout=None,
):
return self.request(
"GET",
url,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
query_params=query_params,
)
def HEAD(
self,
url,
headers=None,
query_params=None,
_preload_content=True,
_request_timeout=None,
):
return self.request(
"HEAD",
url,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
query_params=query_params,
)
def OPTIONS(
self,
url,
headers=None,
query_params=None,
post_params=None,
body=None,
_preload_content=True,
_request_timeout=None,
):
return self.request(
"OPTIONS",
url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body,
)
def DELETE(
self,
url,
headers=None,
query_params=None,
body=None,
_preload_content=True,
_request_timeout=None,
):
return self.request(
"DELETE",
url,
headers=headers,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body,
)
def POST(
self,
url,
headers=None,
query_params=None,
post_params=None,
body=None,
_preload_content=True,
_request_timeout=None,
):
return self.request(
"POST",
url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body,
)
def PUT(
self,
url,
headers=None,
query_params=None,
post_params=None,
body=None,
_preload_content=True,
_request_timeout=None,
):
return self.request(
"PUT",
url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body,
)
def PATCH(
self,
url,
headers=None,
query_params=None,
post_params=None,
body=None,
_preload_content=True,
_request_timeout=None,
):
return self.request(
"PATCH",
url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body,
)
| 33.799492
| 405
| 0.539836
|
3dd32e4baab3f18eed768348426cfd80166212a8
| 65
|
py
|
Python
|
hitherdither/ordered/__init__.py
|
bewar313/probable-octo-eureka
|
aa04303d217745e81dc2c3edd3c10212bc298497
|
[
"MIT"
] | 149
|
2016-09-15T08:39:50.000Z
|
2022-03-30T01:52:22.000Z
|
BubbleBoiBot v1.92/hitherdither/ordered/__init__.py
|
TheLoveableBananaNoodle/-PATCHED-BubbleBoiBot
|
2faa7b40b4f8f1104aab336ab556a7a451fcfc4a
|
[
"MIT"
] | 56
|
2019-02-19T01:57:19.000Z
|
2021-06-14T10:10:18.000Z
|
BubbleBoiBot v1.92/hitherdither/ordered/__init__.py
|
TheLoveableBananaNoodle/-PATCHED-BubbleBoiBot
|
2faa7b40b4f8f1104aab336ab556a7a451fcfc4a
|
[
"MIT"
] | 61
|
2019-02-28T18:25:20.000Z
|
2022-01-21T20:03:44.000Z
|
from . import bayer
from . import yliluoma
from . import cluster
| 16.25
| 22
| 0.769231
|
8e1f2a59fa469c7b47d7d7f118ae23acc54cd660
| 1,857
|
py
|
Python
|
venv/lib/python3.6/site-packages/pylint/test/functional/line_too_long.py
|
aitoehigie/britecore_flask
|
eef1873dbe6b2cc21f770bc6dec783007ae4493b
|
[
"MIT"
] | null | null | null |
venv/lib/python3.6/site-packages/pylint/test/functional/line_too_long.py
|
aitoehigie/britecore_flask
|
eef1873dbe6b2cc21f770bc6dec783007ae4493b
|
[
"MIT"
] | 1
|
2021-06-01T23:32:38.000Z
|
2021-06-01T23:32:38.000Z
|
venv/lib/python3.6/site-packages/pylint/test/functional/line_too_long.py
|
aitoehigie/britecore_flask
|
eef1873dbe6b2cc21f770bc6dec783007ae4493b
|
[
"MIT"
] | null | null | null |
# pylint: disable=invalid-encoded-data, fixme, unnecessary-pass
# +1: [line-too-long]
#####################################################################################################
# +1: [line-too-long]
""" that one is too long tooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo loooooong"""
# The next line is exactly 80 characters long.
A = "--------------------------------------------------------------------------"
# Do not trigger the line-too-long warning if the only token that makes the
# line longer than 80 characters is a trailing pylint disable.
# pylint:disable=invalid-name
var = "This line has a disable pragma and whitespace trailing beyond 80 chars. "
# +1: [line-too-long]
badname = "This line is already longer than 100 characters even without the pragma. Trust me. Please." # pylint:disable=invalid-name
# http://example.com/this/is/a/very/long/url?but=splitting&urls=is&a=pain&so=they&can=be&long
# +1: [line-too-long]
# This line is toooooooooooooooooooooooooooooooooooooooooooooooo looooooooooooooooooooooooooooooooooooooooong #pylint: disable=fixme
# +1: [line-too-long]
# TODO: This line is toooooooooooooooooooooooooooooooooooooooooooooooo looooooooooooooooooooooooooooooooooooooooong #pylint: disable=fixme
def function():
# +3: [line-too-long]
"""This is a docstring.
That contains a very, very long line that exceeds the 100 characters limit by a good margin. So good?
"""
pass
# Don't crash when the line is in a docstring
def func_with_long(parameter):
"""
# pylint: disable=line-too-long
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbcccccccccccccccccccccccccccccccccccccccccccccccccccc
"""
return parameter
| 42.204545
| 200
| 0.684437
|
baaea93049af0130ece395254654e8970568b80f
| 4,684
|
py
|
Python
|
config.py
|
FelixFu520/PANet
|
26b9efcb7b8c44a39a32bcd9d2fa1f4c0cd9ac6e
|
[
"Apache-2.0"
] | null | null | null |
config.py
|
FelixFu520/PANet
|
26b9efcb7b8c44a39a32bcd9d2fa1f4c0cd9ac6e
|
[
"Apache-2.0"
] | null | null | null |
config.py
|
FelixFu520/PANet
|
26b9efcb7b8c44a39a32bcd9d2fa1f4c0cd9ac6e
|
[
"Apache-2.0"
] | null | null | null |
"""Experiment Configuration"""
import os
import re
import glob
import itertools
import sacred
from sacred import Experiment
from sacred.observers import FileStorageObserver
from sacred.utils import apply_backspaces_and_linefeeds
sacred.SETTINGS['CONFIG']['READ_ONLY_CONFIG'] = False
sacred.SETTINGS.CAPTURE_MODE = 'no'
ex = Experiment('PANet')
ex.captured_out_filter = apply_backspaces_and_linefeeds
source_folders = ['.', './dataloaders', './models', './util']
sources_to_save = list(itertools.chain.from_iterable(
[glob.glob(f'{folder}/*.py') for folder in source_folders]))
for source_file in sources_to_save:
ex.add_source_file(source_file)
@ex.config
def cfg():
"""Default configurations"""
input_size = (417, 417)
seed = 1234
cuda_visable = '0, 1'
gpu_id = 0
mode = 'test' # 'train' or 'test'
encoder = 'vgg' # vgg, resnet18
# 设置数据集
# industry1_7
# scaler = 7 # industry1_7
# mean = [0.43945648781646385, 0.43945648781646385, 0.43945648781646385]
# std = [0.2168534245469423, 0.2168534245469423, 0.2168534245469423]
# # industry9_17
# scaler = 9 # industry9_17
# mean = [0.2550296947687293, 0.2550296947687293, 0.2550296947687293]
# std = [0.16025225200718504, 0.16025225200718504, 0.16025225200718504]
# # industry1_20
# scaler = 20
# mean = [0.33195017387549947, 0.33195017387549947, 0.33195017387549947]
# std = [0.2418630842093115, 0.2418630842093115, 0.2418630842093115]
# industry9_19
scaler = 11
mean = [0.29669840400994085, 0.29669840400994085, 0.29669840400994085]
std = [0.22134883075160208, 0.22134883075160208, 0.22134883075160208]
path = {
'log_dir': './runs',
'init_path': './pretrained_model/vgg16-397923af.pth',
'VOC': {'data_dir': '../../data/fsl/VOCtrainval_11-May-2012/VOCdevkit/VOC2012/',
'data_split': 'trainaug', },
'COCO': {'data_dir': '../../data/fsl/COCO/',
'data_split': 'train', },
'INDUSTRY': {'data_dir': '../../data/fsl/industry/industry9_19',
'data_split': 'train', },
}
if mode == 'train':
dataset = 'VOC' # 'VOC','COCO', 'INDUSTRY'
n_steps = 40000
label_sets = 0
batch_size = 1
lr_milestones = [10000, 20000, 30000, 40000]
align_loss_scaler = 1
ignore_label = 255
print_interval = 100
save_pred_every = 10000
model = {
'align': True,
}
task = {
'n_ways': 1,
'n_shots': 1,
'n_queries': 1,
}
optim = {
'lr': 1e-3,
'momentum': 0.9,
'weight_decay': 0.0005,
}
elif mode == 'test':
notrain = False
snapshot = './runs/PANet_VOC_sets_0_1way_1shot_[train]/1/snapshots/30000.pth'
n_runs = 1
n_steps = 100
batch_size = 1
scribble_dilation = 0
bbox = False
scribble = False
# Set dataset config from the snapshot string
if 'VOC' in snapshot:
dataset = 'VOC'
elif 'COCO' in snapshot:
dataset = 'COCO'
elif 'INDUSTRY' in snapshot:
dataset = 'INDUSTRY'
else:
raise ValueError('Wrong snapshot name !')
# Set model config from the snapshot string
model = {}
for key in ['align',]:
model[key] = key in snapshot
# Set label_sets from the snapshot string
label_sets = int(snapshot.split('_sets_')[1][0])
# Set task config from the snapshot string
task = {
'n_ways': int(re.search("[0-9]+way", snapshot).group(0)[:-3]),
'n_shots': int(re.search("[0-9]+shot", snapshot).group(0)[:-4]),
'n_queries': 1,
}
else:
raise ValueError('Wrong configuration for "mode" !')
exp_str = '_'.join(
[dataset,]
+ [key for key, value in model.items() if value]
+ [f'sets_{label_sets}', f'{task["n_ways"]}way_{task["n_shots"]}shot_[{mode}]'])
@ex.config_hook
def add_observer(config, command_name, logger):
"""A hook fucntion to add observer"""
exp_name = f'{ex.path}_{config["exp_str"]}' # ex.path=PANet, config["exp_str"]='VOC_sets_0_1way_1shot_[train]'
if config['mode'] == 'test':
if config['notrain']:
exp_name += '_notrain'
if config['scribble']:
exp_name += '_scribble'
if config['bbox']:
exp_name += '_bbox'
observer = FileStorageObserver.create(os.path.join(config['path']['log_dir'], exp_name))
ex.observers.append(observer)
return config
| 31.226667
| 118
| 0.591375
|
5c5c1b06545353ed38f6f555c9813737843e4270
| 986
|
py
|
Python
|
test/distributed/rpc/test_faulty_agent.py
|
jamestwebber/pytorch
|
cac9ae1506feabfc87d37a208b3d39ed46c59483
|
[
"Intel"
] | 7
|
2021-05-29T16:31:51.000Z
|
2022-02-21T18:52:25.000Z
|
test/distributed/rpc/test_faulty_agent.py
|
jamestwebber/pytorch
|
cac9ae1506feabfc87d37a208b3d39ed46c59483
|
[
"Intel"
] | 1
|
2021-03-25T13:42:15.000Z
|
2021-03-25T13:42:15.000Z
|
test/distributed/rpc/test_faulty_agent.py
|
jamestwebber/pytorch
|
cac9ae1506feabfc87d37a208b3d39ed46c59483
|
[
"Intel"
] | 1
|
2021-12-26T23:20:06.000Z
|
2021-12-26T23:20:06.000Z
|
#!/usr/bin/env python3
import sys
import torch
import torch.distributed as dist
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
from torch.testing._internal.common_utils import IS_PYTORCH_CI, run_tests
from torch.testing._internal.distributed.rpc.faulty_rpc_agent_test_fixture import (
FaultyRpcAgentTestFixture,
)
from torch.testing._internal.distributed.rpc_utils import (
FAULTY_AGENT_TESTS,
MultiProcess,
generate_tests,
)
# On CircleCI these tests are already run on CPU jobs, thus to save resources do
# not run them on GPU jobs, since thet wouldn't provide additional test signal.
if not (IS_PYTORCH_CI and torch.cuda.is_available()):
globals().update(
generate_tests(
"Faulty",
FaultyRpcAgentTestFixture,
FAULTY_AGENT_TESTS,
MultiProcess.SPAWN,
__name__,
)
)
if __name__ == "__main__":
run_tests()
| 25.282051
| 83
| 0.710953
|
92579d391f6ab8bf5ced83ba5b517b9b38f426cd
| 2,752
|
py
|
Python
|
specrel/graphics/__init__.py
|
johanngan/special_relativity
|
cd372c7460d2c0d4040c81bc1bd0090086dba735
|
[
"MIT"
] | 4
|
2020-08-19T04:56:40.000Z
|
2022-02-07T22:09:45.000Z
|
specrel/graphics/__init__.py
|
johanngan/special_relativity
|
cd372c7460d2c0d4040c81bc1bd0090086dba735
|
[
"MIT"
] | null | null | null |
specrel/graphics/__init__.py
|
johanngan/special_relativity
|
cd372c7460d2c0d4040c81bc1bd0090086dba735
|
[
"MIT"
] | null | null | null |
"""Core graphics-generation code for spacetime plots."""
graphrc = {
'fig': None,
'ax': None,
'axs': None,
'grid': False,
'legend': False,
'legend_loc': 'best',
'title': None,
'tlabel': 'Time (seconds)',
'xlabel': 'Position (light-seconds)',
'worldline.lim_padding': 0.1,
'worldline.equal_lim_expand': 1,
'anim.fps': 50,
'anim.display_current': True,
'anim.display_current_decimals': 3,
'anim.time.ct_per_sec': 1,
'anim.time.instant_pause_time': 1,
'anim.transform.time': 0,
'anim.transform.transition_duration': 2,
'anim.worldline.current_time_style': '--',
'anim.worldline.current_time_color': 'red',
}
"""Default parameters shared by various classes and functions in
`specrel.graphics`.
## Items
#### Top-level
- **fig**: `None`
- Matplotlib figure to draw on.
- **ax**: `None`
- Single Matplotlib axis to draw on.
- **axs**: `None`
- List of Matplotlib axes to draw on.
- **grid**: `False`
- Flag for whether or not to plot background grid lines.
- **legend**: False
- Flag for whether or not to plot a legend.
- **legend_loc**: 'best'
- Legend location according to the Matplotlib `loc` parameter.
- **title**: None
- Plot title.
- **tlabel**: 'Time (seconds)'
- Plot y-axis label (corresponding to the time axis).
- **xlabel**: 'Position (light-seconds)'
- Plot x-axis label (corresponding to the position axis).
#### Worldline
- **worldline.lim_padding**: 0.1
- Extra padding on spacetime diagram axis limits, relative to the axis
sizes.
- **worldline.equal_lim_expand**: 1
- If the limits on an axis are specified to be equal, they will be expanded
symmetrically until the axis size is this value.
#### Animators
- **anim.fps**: 50
- Animation frames per second.
- **anim.display_current**: True
- Flag for displaying the current "control value" (e.g. time or velocity)
in each animation frame.
- **anim.display_current_decimals**: 3
- Number of decimals to display the current control value to.
- **anim.time.ct_per_sec**: 1
- Amount of time to pass within an animation for every second of real time.
- **anim.time.instant_pause_time**: 1
- Amount of animation pause time in seconds for instantaneous events.
- **anim.transform.time**: 0
- Time value to fix while animating a Lorentz transformation.
- **anim.transform.transition_duration**: 2
- Real-time duratio in seconds of a Lorentz transform animation.
- **anim.worldline.current_time_style**: '--'
- Matplotlib linestyle for the line of current in animated spacetime plot.
- **anim.worldline.current_time_color**: 'red'
- Matplotlib color for the line of current time in animated spacetime plot.
"""
| 36.210526
| 79
| 0.672238
|
26b957feb52a235c59a9d66dc3c3620a95e456ef
| 6,613
|
py
|
Python
|
display-stuff/OLEDdisplay/demo_oled.py
|
flashypepo/myMicropython-Examples
|
b2b63df865b5ad471b351ca5f279135025859f5d
|
[
"MIT"
] | 3
|
2017-09-03T17:17:44.000Z
|
2017-12-10T12:26:46.000Z
|
display-stuff/OLEDdisplay/demo_oled.py
|
flashypepo/myMicropython-Examples
|
b2b63df865b5ad471b351ca5f279135025859f5d
|
[
"MIT"
] | null | null | null |
display-stuff/OLEDdisplay/demo_oled.py
|
flashypepo/myMicropython-Examples
|
b2b63df865b5ad471b351ca5f279135025859f5d
|
[
"MIT"
] | 2
|
2017-10-01T01:10:55.000Z
|
2018-07-15T19:49:29.000Z
|
# Example of WeMOS Mini + OLED startup file in MicroPython
# Arduino style: setup and loop
#
# History:
# 2017_0715 PePo updated LoLin-ESP32-OLED - SDA/SCL, _DISPLAY_HEIGHT
# 2017-0708 PePo updated for NodeMCU, use 'import machine'
# 2017_0625 PePo updated for WeMOS mini + OLED-display (MicroPython 1.9.1)
# 2017-0101 PePo I2C constructor is changed (MicroPython v1.8.6-273-g5efd650 on 2017-01-01)
# 2016-1023 PePo added scroller()
# 2016-1022 PePo new
#
# Pre-conditions
# * micropython v1.8.5 en hoger, het moet module importeren ondersteunen
# * ssd1306.mpy op de ESP8266
# Bron: https://learn.adafruit.com/micropython-hardware-ssd1306-oled-display/
# function scroller(i2c): SSD1306 Sine Wave Message scroller
# This animates a message scrolling across a SSD1306 display in a sine wave.
# Updated 2016_1023 PePo: dimension WeMOS OLED shield, ESP8266
# Author: Tony DiCola
# License: Public Domain
########################################################################
# Setup code goes below, this is called once at the start of the program: #
########################################################################
#2017_0101: import machine
#from machine import Pin, I2C
import machine
import ssd1306
import time
import math
# 2017_0422 PePo: dimension I2C OLED shield
# 2017_0625 PePo optimized
_DISPLAY_WIDTH = const(128) # Width of display in pixels.
_DISPLAY_HEIGHT = const(64) # LoLin-ESP32-OLED height of display.
#_DISPLAY_HEIGHT = const(32) # Height of display in pixels.
# 2016_1023 PePo: dimension WeMOS OLED shield
#_DISPLAY_WIDTH = const(64) # Width of display in pixels.
#_DISPLAY_HEIGHT = const(48) # Height of display in pixels.
# setup WeMOS OLED shield for I2C
# 2017_0101 micropython v1.8.6 version 2017_0101
# apparently, I2C constructor is changed.
# 2017-0708 i2c = machine.I2C(scl=machine.Pin(5), sda=machine.Pin(4)) '''
''' 2017-0708: SCL/SDA port on NodeMCU:
SCL = 5
SDA = 4
'''
''' SCL/SDA port on LoLin32:
SCL = 22
SDA = 21
'''
#''' 2017-0715: SCL/SDA port on LoLin-ESP32-OLED:
SCL = 4
SDA = 5
#'''
i2c = machine.I2C(scl=machine.Pin(SCL), sda=machine.Pin(SDA))
#i2c = I2C(scl=Pin(SCL), sda=Pin(SDA), freq=100000)
#i2c.scan() #[60]
oled = ssd1306.SSD1306_I2C(_DISPLAY_WIDTH, _DISPLAY_HEIGHT, i2c)
oled.fill(0) # blank oled
oled.show()
def runningcount():
n = _DISPLAY_HEIGHT // 8
for i in range(n):
oled.text('regel: {0}'.format(i),0,i*8)
oled.show()
time.sleep(1) # wait 1 sec
oled.fill(0) # restart with blank oled
oled.show()
def showmessage(msg):
oled.text(msg[0], 10, 10) #text
oled.text(msg[1], 10, 20) #text
oled.pixel(0,0,1) # 4 pixels in corners
oled.pixel(_DISPLAY_WIDTH-1,_DISPLAY_HEIGHT-1,1)
oled.pixel(_DISPLAY_WIDTH-1,0,1)
oled.pixel(0,_DISPLAY_HEIGHT-1,1)
oled.show() # display all
# Configure message that will scroll.
#MESSAGE = 'Hello world this is a fun scroller!'
#MESSAGE = 'MicroPython funny scroller!'
#MESSAGE = 'Welkom Bert ------'
#MESSAGE = 'Welkom Peter..........'
MESSAGE = 'MicropPython Rocks! '
# Other configuration:
#TODO: PePo optimization
FONT_WIDTH = 8 # Width of font characters in pixels.
FONT_HEIGHT = 8 # Height of the font characters in pixels.
#ORG: AMPLITUDE = 0.3*(_DISPLAY_HEIGHT - FONT_HEIGHT) # Amplitude of sine wave, in pixels.
AMPLITUDE = 0.5*(_DISPLAY_HEIGHT - FONT_HEIGHT) # Amplitude of sine wave, in pixels.
FREQUENCY = 2 #5 Sine wave frequency, how often it repeats across screen.
OFFSET_Y = int(0.5*_DISPLAY_HEIGHT)-FONT_HEIGHT #PePo: added offset in Y
def scroller(i2c, message):
# Global state:
oled = ssd1306.SSD1306_I2C(_DISPLAY_WIDTH, _DISPLAY_HEIGHT, i2c)
pos = _DISPLAY_WIDTH # X position of the starting character in the message.
#2017_0708 ORG: message_len_px = len(MESSAGE) * FONT_WIDTH # Pixel width of the message.
message_len_px = len(message) * FONT_WIDTH # Pixel width of the message.
# Build a lookup table of wavy Y positions for each column. This will speed
# up the main loop by not constantly computing Y positions. Remember characters
# can be drawn off screen to the left so increase the lookup table a bit to
# compute their Y positions too.
lookup_y = [0] * (_DISPLAY_WIDTH+FONT_WIDTH)
for i in range(len(lookup_y)):
t = i / (_DISPLAY_WIDTH-1) # Compute current 'time' as position along
# lookup table in 0 to 1 range.
# Use a sine wave that's offset to the range 0 to AMPLITUDE to compute
# each character Y position at a given X.
lookup_y[i] = int(((AMPLITUDE/2.0) * math.sin(2.0*math.pi*FREQUENCY*t)) + (AMPLITUDE/2.0))
# Main loop:
while True:
# Clear the screen.
oled.fill(0)
# Move left a bit, then check if the entire message has scrolled past
# and start over from far right.
pos -= 1
if pos <= -message_len_px:
pos = _DISPLAY_WIDTH
# Go through each character in the message.
for i in range(len(message)): #2017_0708 len(MESSAGE)):
char = message[i] #2017_0708 MESSAGE[i]
char_x = pos + (i * FONT_WIDTH) # Character's X position on the screen.
if -FONT_WIDTH <= char_x < _DISPLAY_WIDTH:
# Character is visible, draw it.
# Look up the Y position in the previously computed lookup table.
# Remember the lookup table spans from all visible pixels and
# an extra FONT_WIDTH number of pixels on the left (so offset
# a bit when indexing into the table).
oled.text(char, char_x, OFFSET_Y + lookup_y[char_x+FONT_WIDTH])
oled.show()
# 1. show static text with pixels in corners
runningcount()
time.sleep(1) # wait
oled.fill(0) # blank screen
# 2. show static text with pixels in corners
showmessage(["Welkom", "Peter"])
time.sleep(2) # wait
# 3. specify demo function
def demo(i2c, message):
try:
scroller(i2c, message)
except KeyboardInterrupt:
oled.fill(1)
oled.show()
time.sleep(1.0)
oled.fill(0)
oled.show()
print('Done!')
###################################################################
# Loop code goes inside the loop here, this is called repeatedly: #
###################################################################
# run
machine.freq(160000000) # high speed CPU
demo(i2c, 'Micropython rocks!!')
machine.freq(80000000) # nominal speed CPU
print(machine.freq())
# print('usage: scroller(i2c)')
#showmessage(['usage:', 'scroller(i2c)'])
| 38.672515
| 98
| 0.647059
|
0cd07ed6e6cd77882c9740905d38a6f836f3a4b7
| 49,345
|
py
|
Python
|
src/transformers/models/fsmt/modeling_fsmt.py
|
ddkalamk/transformers
|
ae333d04b29a25be1a70eaccd6260c294c243c5b
|
[
"Apache-2.0"
] | 1
|
2020-12-31T16:39:09.000Z
|
2020-12-31T16:39:09.000Z
|
src/transformers/models/fsmt/modeling_fsmt.py
|
ddkalamk/transformers
|
ae333d04b29a25be1a70eaccd6260c294c243c5b
|
[
"Apache-2.0"
] | null | null | null |
src/transformers/models/fsmt/modeling_fsmt.py
|
ddkalamk/transformers
|
ae333d04b29a25be1a70eaccd6260c294c243c5b
|
[
"Apache-2.0"
] | 1
|
2021-11-01T20:42:12.000Z
|
2021-11-01T20:42:12.000Z
|
# coding=utf-8
# Copyright 2020 The Facebook AI Research Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Original implementation: https://github.com/pytorch/fairseq/tree/master/examples/wmt19
# Authors:
# - @alexeib Alexei Baevski
# - @edunov Sergey Edunov
# - @michaelauli Michael Auli
# - @myleott Myle Ott
# - @nng555 Nathan Ng
# - David Grangier
# - Kyra Yee
#
# Paper: Facebook FAIR's WMT19 News Translation Task Submission https://arxiv.org/abs/1907.06616
#
"""PyTorch Fairseq model, ported from https://github.com/pytorch/fairseq/tree/master/examples/wmt19"""
import math
import random
from typing import Any, Dict, List, Optional, Tuple
import torch
import torch.nn.functional as F
from torch import Tensor, nn
from torch.nn import CrossEntropyLoss
from ...activations import ACT2FN
from ...file_utils import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from ...modeling_outputs import (
BaseModelOutput,
BaseModelOutputWithPastAndCrossAttentions,
Seq2SeqLMOutput,
Seq2SeqModelOutput,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_fsmt import FSMTConfig
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "FSMTConfig"
_TOKENIZER_FOR_DOC = "FSMTTokenizer"
# See all FSMT models at https://huggingface.co/models?filter=fsmt
# Porting notes:
# this one is modeled after BartModel*
#
# Currently only translation (fairseq also has weights for LM)
#
# fairseq provides weights for ru-en, en-ru and de-en, en-de pairs. All have been ported.
# - ru-en, en-ru use asymmetric vocab
# - de-en, en-de use a merged single vocab (but the code works as if they are separate)
#
# Differences with Bart:
# - not using bos token
# - 2 separate vocabs (src and target)
# - embed weights aren't tied
# - uses a model Ensemble (but that part isn't ported/implemented yet) - so we
# aren't getting as good of a BLEU score
# - uses a projection layer at the end of the decoder
# - doesn't use final_logits_bias
# - beam search: stops as soon as num_beams == len(hypos) (whereas transformers
# is not satisfied there and will continue searching until the next cycles
# aren't promising something better), comparing BLEU scores - the transformers
# algorithm is slightly superior, therefore using the latter. But if you want
# to match fairseq outputs, you need to pass ``early_stopping=True`` to ``generate()``.
#
# SinusoidalPositionalEmbedding is slightly different from Bart's - generates
# different embeddings. This implementation is copied verbatim from fairseq with
# some small changes to make it work here.
#
# Other changes:
# - doesn't support use_cache as Bart's version does
#
#
# FSMTConfig changes with BartConfig
#
# Differences with BART:
# - src/tgt vocabs aren't shared
# - token embeddings aren't shared
# - needs a language pair
# - scale_embedding are True
#
# some unused args were removed too
#
#
# TODO:
# - port model ensemble (fs uses 4 model checkpoints)
# - solve beam search discrepancies
# docstyle-ignore
"""
Here is how to compare BLEU scores against fairseq implementation:
# en-ru
export PAIR=en-ru
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=50
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
# (fairseq BLEU: 36.4 http://matrix.statmt.org/matrix/output/1914?score_id=37605)
# ru-en
export PAIR=ru-en
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=50
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
# (fairseq BLEU: 41.3 http://matrix.statmt.org/matrix/output/1907?run_id=6937)
# de-en
export PAIR=de-en
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=50
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
# (fairseq BLEU: 42.3 http://matrix.statmt.org/matrix/output/1902?run_id=6750)
# en-de
export PAIR=en-de
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
# (fairseq BLEU: 43.1 http://matrix.statmt.org/matrix/output/1909?run_id=6862)
"""
FSMT_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config (:class:`~transformers.FSMTConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
FSMT_GENERATION_EXAMPLE = r"""
Translation example::
from transformers import FSMTTokenizer, FSMTForConditionalGeneration
mname = "facebook/wmt19-ru-en"
model = FSMTForConditionalGeneration.from_pretrained(mname)
tokenizer = FSMTTokenizer.from_pretrained(mname)
src_text = "Машинное обучение - это здорово, не так ли?"
input_ids = tokenizer.encode(src_text, return_tensors='pt')
outputs = model.generate(input_ids, num_beams=5, num_return_sequences=3)
for i, output in enumerate(outputs):
decoded = tokenizer.decode(output, skip_special_tokens=True)
print(f"{i}: {decoded})
# 1: Machine learning is great, isn't it? ...
"""
FSMT_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
IIndices can be obtained using :class:`~transformers.FSTMTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
decoder_input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`):
Provide for translation and summarization training. By default, the model will create this tensor by
shifting the input_ids right, following the paper.
decoder_attention_mask (:obj:`torch.BoolTensor` of shape :obj:`(batch_size, tgt_seq_len)`, `optional`):
Default behavior: generate a tensor that ignores pad tokens in :obj:`decoder_input_ids`. Causal mask will
also be used by default. If you want to change padding behavior, you should read
:func:`modeling_fstm._prepare_fstm_decoder_inputs` and modify. See diagram 1 in the paper for more info on
the default strategy
encoder_outputs (:obj:`Tuple(torch.FloatTensor)`, `optional`):
Tuple consists of (:obj:`last_hidden_state`, `optional`: :obj:`hidden_states`, `optional`:
:obj:`attentions`) :obj:`last_hidden_state` of shape :obj:`(batch_size, sequence_length, hidden_size)` is a
sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of
the decoder.
past_key_values (:obj:`Tuple(torch.FloatTensor)` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`, defaults to :obj:`True`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
have_fused_layer_norm = False
try:
from apex.normalization import FusedLayerNorm
have_fused_layer_norm = True
except ImportError:
pass
LayerNorm = FusedLayerNorm if have_fused_layer_norm else torch.nn.LayerNorm
def invert_mask(attention_mask):
"""Turns 1->0, 0->1, False->True, True-> False"""
assert attention_mask.dim() == 2
return attention_mask.eq(0)
def _prepare_fsmt_decoder_inputs(
config, input_ids, decoder_input_ids=None, decoder_padding_mask=None, causal_mask_dtype=torch.float32
):
"""
Prepare masks that ignore padding tokens in the decoder and a causal mask for the decoder if none are provided.
This mimics the default behavior in fairseq. To override it pass in masks. Note: this is not called during
generation
"""
pad_token_id = config.pad_token_id
if decoder_input_ids is None:
decoder_input_ids = shift_tokens_right(input_ids, pad_token_id)
bsz, tgt_len = decoder_input_ids.size()
if decoder_padding_mask is None:
decoder_padding_mask = make_padding_mask(decoder_input_ids, pad_token_id)
else:
decoder_padding_mask = invert_mask(decoder_padding_mask)
causal_mask = torch.triu(fill_with_neg_inf(torch.zeros(tgt_len, tgt_len)), 1).to(
dtype=causal_mask_dtype, device=decoder_input_ids.device
)
return decoder_input_ids, decoder_padding_mask, causal_mask
class PretrainedFSMTModel(PreTrainedModel):
config_class = FSMTConfig
base_model_prefix = "model"
def _init_weights(self, module):
std = self.config.init_std
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, SinusoidalPositionalEmbedding):
pass
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
@property
def dummy_inputs(self):
pad_token = self.config.pad_token_id
input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]], device=self.device)
dummy_inputs = {
"attention_mask": input_ids.ne(pad_token),
"input_ids": input_ids,
}
return dummy_inputs
def _make_linear_from_emb(emb):
vocab_size, emb_size = emb.weight.shape
lin_layer = nn.Linear(vocab_size, emb_size, bias=False)
lin_layer.weight.data = emb.weight.data
return lin_layer
# Helper Functions, mostly for making masks
def _check_shapes(shape_1, shape2):
if shape_1 != shape2:
raise AssertionError("shape mismatch: {} != {}".format(shape_1, shape2))
def shift_tokens_right(input_ids, pad_token_id):
"""Shift input ids one token to the right, and wrap the last non pad token (usually <eos>)."""
prev_output_tokens = input_ids.clone()
index_of_eos = (input_ids.ne(pad_token_id).sum(dim=1) - 1).unsqueeze(-1)
prev_output_tokens[:, 0] = input_ids.gather(1, index_of_eos).squeeze()
prev_output_tokens[:, 1:] = input_ids[:, :-1]
return prev_output_tokens
def make_padding_mask(input_ids, padding_idx=1):
"""True for pad tokens"""
padding_mask = input_ids.eq(padding_idx)
if not padding_mask.any():
padding_mask = None
return padding_mask
# Helper Modules
class EncoderLayer(nn.Module):
def __init__(self, config: FSMTConfig):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = Attention(self.embed_dim, config.encoder_attention_heads, dropout=config.attention_dropout)
self.self_attn_layer_norm = LayerNorm(self.embed_dim)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
self.final_layer_norm = LayerNorm(self.embed_dim)
def forward(self, x, encoder_padding_mask, output_attentions=False):
"""
Args:
x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_padding_mask (ByteTensor): binary ByteTensor of shape
`(batch, src_len)` where padding elements are indicated by ``1``.
for t_tgt, t_src is excluded (or masked out), =0 means it is
included in attention
Returns:
encoded output of shape `(seq_len, batch, embed_dim)`
"""
residual = x
x, attn_weights = self.self_attn(
query=x, key=x, key_padding_mask=encoder_padding_mask, output_attentions=output_attentions
)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.self_attn_layer_norm(x)
residual = x
x = self.activation_fn(self.fc1(x))
x = F.dropout(x, p=self.activation_dropout, training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.final_layer_norm(x)
return x, attn_weights
class FSMTEncoder(nn.Module):
"""
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
:class:`EncoderLayer`.
Args:
config: FSMTConfig
"""
def __init__(self, config: FSMTConfig, embed_tokens):
super().__init__()
self.dropout = config.dropout
self.layerdrop = config.encoder_layerdrop
self.padding_idx = embed_tokens.padding_idx
self.embed_tokens = embed_tokens
embed_dim = embed_tokens.embedding_dim
self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
self.embed_positions = SinusoidalPositionalEmbedding(
config.max_position_embeddings + self.padding_idx + 1, embed_dim, self.padding_idx
)
self.layers = nn.ModuleList(
[EncoderLayer(config) for _ in range(config.encoder_layers)]
) # type: List[EncoderLayer]
def forward(
self, input_ids, attention_mask=None, output_attentions=False, output_hidden_states=False, return_dict=True
):
"""
Args:
input_ids (LongTensor): tokens in the source language of shape
`(batch, src_len)`
attention_mask (torch.LongTensor): indicating which indices are padding tokens
Returns:
BaseModelOutput or Tuple comprised of:
- **x** (Tensor): the last encoder layer's output of shape `(src_len, batch, embed_dim)`
- **encoder_states** (tuple(torch.FloatTensor)): all intermediate hidden states of shape `(src_len,
batch, embed_dim)`. Only populated if *output_hidden_states:* is True.
- **all_attentions** (tuple(torch.FloatTensor)): Attention weights for each layer.
During training might not be of length n_layers because of layer dropout.
"""
# check attention mask and invert
if attention_mask is not None:
attention_mask = invert_mask(attention_mask)
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
embed_pos = self.embed_positions(input_ids)
x = inputs_embeds + embed_pos
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
for encoder_layer in self.layers:
if output_hidden_states:
x = x.transpose(0, 1) # T x B x C -> B x T x C
encoder_states += (x,)
x = x.transpose(0, 1) # B x T x C -> T x B x C
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = random.uniform(0, 1)
if self.training and (dropout_probability < self.layerdrop): # skip the layer
attn = None
else:
x, attn = encoder_layer(x, attention_mask, output_attentions=output_attentions)
if output_attentions:
all_attentions = all_attentions + (attn,)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
if output_hidden_states:
encoder_states += (x,)
if not return_dict:
return tuple(v for v in [x, encoder_states, all_attentions] if v is not None)
return BaseModelOutput(last_hidden_state=x, hidden_states=encoder_states, attentions=all_attentions)
class DecoderLayer(nn.Module):
def __init__(self, config: FSMTConfig):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = Attention(
embed_dim=self.embed_dim,
num_heads=config.decoder_attention_heads,
dropout=config.attention_dropout,
)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.self_attn_layer_norm = LayerNorm(self.embed_dim)
self.encoder_attn = Attention(
self.embed_dim,
config.decoder_attention_heads,
dropout=config.attention_dropout,
encoder_decoder_attention=True,
)
self.encoder_attn_layer_norm = LayerNorm(self.embed_dim)
self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
self.final_layer_norm = LayerNorm(self.embed_dim)
def forward(
self,
x,
encoder_hidden_states,
encoder_attn_mask=None,
layer_state=None,
causal_mask=None,
decoder_padding_mask=None,
output_attentions=False,
):
residual = x
if layer_state is None:
layer_state = {}
# Self Attention
x, self_attn_weights = self.self_attn(
query=x,
key=x,
layer_state=layer_state, # adds keys to layer state
key_padding_mask=decoder_padding_mask,
attn_mask=causal_mask,
output_attentions=output_attentions,
)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.self_attn_layer_norm(x)
# Cross attention
residual = x
assert self.encoder_attn.cache_key != self.self_attn.cache_key
x, cross_attn_weights = self.encoder_attn(
query=x,
key=encoder_hidden_states,
key_padding_mask=encoder_attn_mask,
layer_state=layer_state, # mutates layer state
output_attentions=output_attentions,
)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.encoder_attn_layer_norm(x)
# Fully Connected
residual = x
x = self.activation_fn(self.fc1(x))
x = F.dropout(x, p=self.activation_dropout, training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.final_layer_norm(x)
return (
x,
self_attn_weights,
layer_state,
cross_attn_weights,
) # layer_state = cache for decoding
class FSMTDecoder(nn.Module):
"""
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a :class:`DecoderLayer`
Args:
config: FSMTConfig
embed_tokens (torch.nn.Embedding): output embedding
"""
def __init__(self, config: FSMTConfig, embed_tokens: nn.Embedding):
super().__init__()
self.dropout = config.dropout
self.layerdrop = config.decoder_layerdrop
self.padding_idx = embed_tokens.padding_idx
self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
self.embed_tokens = embed_tokens
embed_dim = embed_tokens.embedding_dim
self.embed_positions = SinusoidalPositionalEmbedding(
config.max_position_embeddings + self.padding_idx + 1, embed_dim, self.padding_idx
)
self.layers = nn.ModuleList(
[DecoderLayer(config) for _ in range(config.decoder_layers)]
) # type: List[DecoderLayer]
self.output_projection = nn.Linear(
self.embed_tokens.weight.shape[1],
self.embed_tokens.weight.shape[0],
bias=False,
)
self.output_projection.weight = self.embed_tokens.weight
def forward(
self,
input_ids,
encoder_hidden_states,
encoder_padding_mask,
decoder_padding_mask,
decoder_causal_mask,
past_key_values=None,
use_cache=False,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
"""
Includes several features from "Jointly Learning to Align and Translate with Transformer Models" (Garg et al.,
EMNLP 2019).
Args:
input_ids (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for teacher forcing
encoder_hidden_states: output from the encoder, used for
encoder-side attention
encoder_padding_mask: for ignoring pad tokens
past_key_values (dict or None): dictionary used for storing state during generation
Returns:
BaseModelOutputWithPast or tuple:
- the decoder's features of shape `(batch, tgt_len, embed_dim)`
- the cache
- hidden states
- attentions
"""
# check attention mask and invert
if encoder_padding_mask is not None:
encoder_padding_mask = invert_mask(encoder_padding_mask)
# embed positions
positions = self.embed_positions(input_ids) # , use_cache=use_cache)
if use_cache:
input_ids = input_ids[:, -1:]
positions = positions[:, -1:] # happens after we embed them
# assert input_ids.ne(self.padding_idx).any()
x = self.embed_tokens(input_ids) * self.embed_scale
x += positions
x = F.dropout(x, p=self.dropout, training=self.training)
# Convert to FSMT output format: (seq_len, BS, model_dim) -> (BS, seq_len, model_dim)
x = x.transpose(0, 1)
encoder_hidden_states = encoder_hidden_states.transpose(0, 1)
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_cross_attns = () if output_attentions else None
next_decoder_cache = []
for idx, decoder_layer in enumerate(self.layers):
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
if output_hidden_states:
x = x.transpose(0, 1)
all_hidden_states += (x,)
x = x.transpose(0, 1)
dropout_probability = random.uniform(0, 1)
if self.training and (dropout_probability < self.layerdrop):
continue
layer_state = past_key_values[idx] if past_key_values is not None else None
x, layer_self_attn, layer_past, layer_cross_attn = decoder_layer(
x,
encoder_hidden_states,
encoder_attn_mask=encoder_padding_mask,
decoder_padding_mask=decoder_padding_mask,
layer_state=layer_state,
causal_mask=decoder_causal_mask,
output_attentions=output_attentions,
)
if use_cache:
next_decoder_cache.append(layer_past.copy())
if output_attentions:
all_self_attns += (layer_self_attn,)
all_cross_attns += (layer_cross_attn,)
# add hidden states from the last decoder layer
if output_hidden_states:
x = x.transpose(0, 1)
all_hidden_states += (x,)
x = x.transpose(0, 1)
# Convert to standard output format: (seq_len, BS, model_dim) -> (BS, seq_len, model_dim)
x = x.transpose(0, 1)
encoder_hidden_states = encoder_hidden_states.transpose(0, 1)
x = self.output_projection(x)
next_cache = next_decoder_cache if use_cache else None
if not return_dict:
return tuple(
v for v in [x, next_cache, all_hidden_states, all_self_attns, all_cross_attns] if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=x,
past_key_values=next_cache,
hidden_states=all_hidden_states,
attentions=all_self_attns,
cross_attentions=all_cross_attns,
)
def _reorder_buffer(attn_cache, new_order):
for k, input_buffer_k in attn_cache.items():
if input_buffer_k is not None:
attn_cache[k] = input_buffer_k.index_select(0, new_order)
return attn_cache
class Attention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(
self,
embed_dim,
num_heads,
dropout=0.0,
bias=True,
encoder_decoder_attention=False, # otherwise self_attention
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
self.scaling = self.head_dim ** -0.5
self.encoder_decoder_attention = encoder_decoder_attention
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.cache_key = "encoder_decoder" if self.encoder_decoder_attention else "self"
def _shape(self, tensor, seq_len, bsz):
return tensor.contiguous().view(seq_len, bsz * self.num_heads, self.head_dim).transpose(0, 1)
def forward(
self,
query,
key: Optional[Tensor],
key_padding_mask: Optional[Tensor] = None,
layer_state: Optional[Dict[str, Optional[Tensor]]] = None,
attn_mask: Optional[Tensor] = None,
output_attentions=False,
) -> Tuple[Tensor, Optional[Tensor]]:
"""Input shape: Time(SeqLen) x Batch x Channel"""
static_kv: bool = self.encoder_decoder_attention
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == self.embed_dim
assert list(query.size()) == [tgt_len, bsz, embed_dim]
# get here for encoder decoder cause of static_kv
if layer_state is not None: # reuse k,v and encoder_padding_mask
saved_state = layer_state.get(self.cache_key, {})
if "prev_key" in saved_state and static_kv:
# previous time steps are cached - no need to recompute key and value if they are static
key = None
else:
saved_state = None
layer_state = {}
q = self.q_proj(query) * self.scaling
if static_kv:
if key is None:
k = v = None
else:
k = self.k_proj(key)
v = self.v_proj(key)
else:
k = self.k_proj(query)
v = self.v_proj(query)
q = self._shape(q, tgt_len, bsz)
if k is not None:
k = self._shape(k, -1, bsz)
if v is not None:
v = self._shape(v, -1, bsz)
if saved_state is not None:
k, v, key_padding_mask = self._use_saved_state(k, v, saved_state, key_padding_mask, static_kv, bsz)
# Update cache
layer_state[self.cache_key] = {
"prev_key": k.view(bsz, self.num_heads, -1, self.head_dim),
"prev_value": v.view(bsz, self.num_heads, -1, self.head_dim),
"prev_key_padding_mask": key_padding_mask if not static_kv else None,
}
assert k is not None
src_len = k.size(1)
attn_weights = torch.bmm(q, k.transpose(1, 2))
assert attn_weights.size() == (bsz * self.num_heads, tgt_len, src_len)
if attn_mask is not None:
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attn_mask
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
# This is part of a workaround to get around fork/join parallelism not supporting Optional types.
if key_padding_mask is not None and key_padding_mask.dim() == 0:
key_padding_mask = None
assert key_padding_mask is None or key_padding_mask.size()[:2] == (
bsz,
src_len,
)
if key_padding_mask is not None: # don't attend to padding symbols
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
reshaped = key_padding_mask.unsqueeze(1).unsqueeze(2)
attn_weights = attn_weights.masked_fill(reshaped, float("-inf"))
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
attn_weights = F.softmax(attn_weights, dim=-1)
if output_attentions:
# make sure that attn_weights are included in graph
attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
else:
attn_weights_reshaped = None
attn_probs = F.dropout(
attn_weights,
p=self.dropout,
training=self.training,
)
assert v is not None
attn_output = torch.bmm(attn_probs, v)
assert attn_output.size() == (bsz * self.num_heads, tgt_len, self.head_dim)
attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights_reshaped
def _use_saved_state(self, k, v, saved_state, key_padding_mask, static_kv, bsz):
# saved states are stored with shape (bsz, num_heads, seq_len, head_dim)
if "prev_key" in saved_state:
_prev_key = saved_state["prev_key"]
assert _prev_key is not None
prev_key = _prev_key.view(bsz * self.num_heads, -1, self.head_dim)
if static_kv:
k = prev_key
else:
assert k is not None
k = torch.cat([prev_key, k], dim=1)
if "prev_value" in saved_state:
_prev_value = saved_state["prev_value"]
assert _prev_value is not None
prev_value = _prev_value.view(bsz * self.num_heads, -1, self.head_dim)
if static_kv:
v = prev_value
else:
assert v is not None
v = torch.cat([prev_value, v], dim=1)
assert k is not None and v is not None
prev_key_padding_mask: Optional[Tensor] = saved_state.get("prev_key_padding_mask", None)
if prev_key_padding_mask is not None:
if static_kv:
new_key_padding_mask = prev_key_padding_mask
else:
new_key_padding_mask = torch.cat([prev_key_padding_mask, key_padding_mask], dim=1)
else:
new_key_padding_mask = key_padding_mask
return k, v, new_key_padding_mask
def fill_with_neg_inf(t):
"""FP16-compatible function that fills a input_ids with -inf."""
return t.float().fill_(float("-inf")).type_as(t)
# Public API
def _get_shape(t):
return getattr(t, "shape", None)
@add_start_docstrings(
"The bare FSMT Model outputting raw hidden-states without any specific head on top.",
FSMT_START_DOCSTRING,
)
class FSMTModel(PretrainedFSMTModel):
def __init__(self, config: FSMTConfig):
super().__init__(config)
padding_idx = config.pad_token_id
encoder_embed_tokens = nn.Embedding(config.src_vocab_size, config.d_model, padding_idx)
decoder_embed_tokens = nn.Embedding(config.tgt_vocab_size, config.d_model, padding_idx)
self.encoder = FSMTEncoder(config, encoder_embed_tokens)
self.decoder = FSMTDecoder(config, decoder_embed_tokens)
self.init_weights()
@add_start_docstrings_to_model_forward(FSMT_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="facebook/wmt19-ru-en",
output_type=Seq2SeqModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
encoder_outputs: Optional[Tuple] = None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
if decoder_input_ids is None:
use_cache = False
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# make masks if user doesn't supply
if not use_cache:
decoder_input_ids, decoder_padding_mask, causal_mask = _prepare_fsmt_decoder_inputs(
self.config,
input_ids,
decoder_input_ids=decoder_input_ids,
decoder_padding_mask=decoder_attention_mask,
causal_mask_dtype=self.decoder.embed_tokens.weight.dtype,
)
else:
decoder_padding_mask, causal_mask = None, None
assert decoder_input_ids is not None
if encoder_outputs is None:
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
# If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=False
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
decoder_outputs = self.decoder(
decoder_input_ids,
encoder_outputs[0],
attention_mask,
decoder_padding_mask,
decoder_causal_mask=causal_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if not return_dict:
return decoder_outputs + encoder_outputs
return Seq2SeqModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
def get_input_embeddings(self):
return self.encoder.embed_tokens
def set_input_embeddings(self, value):
self.encoder.embed_tokens = value
def get_output_embeddings(self):
return self.decoder.embed_tokens
def set_output_embeddings(self, value):
self.decoder.embed_tokens = value
@add_start_docstrings(
"The FSMT Model with a language modeling head. Can be used for summarization.", FSMT_START_DOCSTRING
)
class FSMTForConditionalGeneration(PretrainedFSMTModel):
base_model_prefix = "model"
_keys_to_ignore_on_load_missing = [
"model.encoder.embed_positions.weight",
"model.decoder.embed_positions.weight",
]
_keys_to_ignore_on_save = [
"model.encoder.embed_positions.weight",
"model.decoder.embed_positions.weight",
]
def __init__(self, config: FSMTConfig):
super().__init__(config)
base_model = FSMTModel(config)
self.model = base_model
def resize_token_embeddings(self, new_num_tokens: int) -> nn.Embedding:
new_embeddings = super().resize_token_embeddings(new_num_tokens)
self.model.encoder.embed_tokens = new_embeddings
new_embeddings = super().resize_token_embeddings(new_num_tokens)
self.model.decoder.embed_tokens = new_embeddings
# XXX: this is not quite correct, as we have 2 different `new_embeddings`, and
# only one return value is expected. Needs to be redesigned in the core to support dual dicts
raise NotImplementedError("this method needs re-thinking for models with 2 separate dictionaries")
return new_embeddings
@add_start_docstrings_to_model_forward(FSMT_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
@add_end_docstrings(FSMT_GENERATION_EXAMPLE)
def forward(
self,
input_ids,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
encoder_outputs=None,
past_key_values=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should either be in ``[0, ...,
config.vocab_size]`` or -100 (see ``input_ids`` docstring). Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``.
Returns:
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
use_cache = False
outputs = self.model(
input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
encoder_outputs=encoder_outputs,
decoder_attention_mask=decoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
lm_logits = outputs[0]
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
# TODO(SS): do we need to ignore pad tokens in labels?
masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.tgt_vocab_size), labels.view(-1))
if not return_dict:
output = (lm_logits,) + outputs[1:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return Seq2SeqLMOutput(
loss=masked_lm_loss,
logits=lm_logits,
past_key_values=outputs.past_key_values,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
cross_attentions=outputs.cross_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
)
def prepare_inputs_for_generation(
self, decoder_input_ids, past=None, attention_mask=None, use_cache=None, encoder_outputs=None, **kwargs
):
return {
"input_ids": None, # encoder_outputs is defined. input_ids not needed
"encoder_outputs": encoder_outputs,
"past_key_values": past,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"use_cache": use_cache, # change this to avoid caching (presumably for debugging)
}
def adjust_logits_during_generation(self, logits, cur_len, max_length):
if cur_len == max_length - 1 and self.config.eos_token_id is not None:
self._force_token_ids_generation(logits, self.config.eos_token_id)
return logits
def _force_token_ids_generation(self, scores, token_ids) -> None:
"""force one of token_ids to be generated by setting prob of all other tokens to 0"""
if isinstance(token_ids, int):
token_ids = [token_ids]
all_but_token_ids_mask = torch.tensor(
[x for x in range(self.config.tgt_vocab_size) if x not in token_ids],
dtype=torch.long,
device=next(self.parameters()).device,
)
assert len(scores.shape) == 2, "scores should be of rank 2 with shape: [batch_size, vocab_size]"
scores[:, all_but_token_ids_mask] = -float("inf")
@staticmethod
def _reorder_cache(past, beam_idx):
reordered_past = []
for layer_past in past:
# get the correct batch idx from decoder layer's batch dim for cross and self-attn
layer_past_new = {
attn_key: _reorder_buffer(attn_cache, beam_idx) for attn_key, attn_cache in layer_past.items()
}
reordered_past.append(layer_past_new)
return reordered_past
def get_encoder(self):
return self.model.encoder
def get_output_embeddings(self):
return self.model.decoder.embed_tokens
class SinusoidalPositionalEmbedding(nn.Embedding):
"""
This module produces sinusoidal positional embeddings of any length.
We don't want to save the weight of this embedding since it's not trained (deterministic) and it can be huge.
Padding symbols are ignored.
These embeddings get automatically extended in forward if more positions is needed.
"""
def __init__(self, num_positions, embedding_dim, padding_idx):
self.make_weight(num_positions, embedding_dim, padding_idx)
def make_weight(self, num_positions, embedding_dim, padding_idx):
weight = self.get_embedding(num_positions, embedding_dim, padding_idx)
if not hasattr(self, "weight"):
# in ___init__
super().__init__(num_positions, embedding_dim, padding_idx, _weight=weight)
else:
# in forward
weight = weight.to(self.weight.device)
self.weight = nn.Parameter(weight)
self.weight.detach_()
self.weight.requires_grad = False
@staticmethod
def get_embedding(num_embeddings, embedding_dim, padding_idx):
"""
Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of
"Attention Is All You Need".
"""
half_dim = embedding_dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb)
emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze(1) * emb.unsqueeze(0)
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1)
if embedding_dim % 2 == 1:
# zero pad
emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
if padding_idx is not None:
emb[padding_idx, :] = 0
return emb
@staticmethod
def make_positions(tensor, padding_idx: int):
"""
Replace non-padding symbols with their position numbers.
Position numbers begin at padding_idx+1. Padding symbols are ignored.
"""
# The series of casts and type-conversions here are carefully
# balanced to both work with ONNX export and XLA. In particular XLA
# prefers ints, cumsum defaults to output longs, and ONNX doesn't know
# how to handle the dtype kwarg in cumsum.
mask = tensor.ne(padding_idx).int()
return (torch.cumsum(mask, dim=1).type_as(mask) * mask).long() + padding_idx
def forward(
self,
input,
incremental_state: Optional[Any] = None,
timestep: Optional[Tensor] = None,
):
"""Input is expected to be of size [bsz x seqlen]."""
bsz, seq_len = input.shape[:2]
max_pos = self.padding_idx + 1 + seq_len
if max_pos > self.weight.size(0):
# expand embeddings if needed
self.make_weight(max_pos, self.embedding_dim, self.padding_idx)
positions = self.make_positions(input, self.padding_idx)
return super().forward(positions)
| 40.020276
| 270
| 0.662945
|
4fe288a2b33119daeb37e6d4cc82e26f5de8706f
| 43,604
|
py
|
Python
|
xgboost_ray/tests/test_sklearn.py
|
edoakes/xgboost_ray
|
1e16381f42c523caa854bd625ddb969553fee87d
|
[
"Apache-2.0"
] | 80
|
2020-10-13T10:10:05.000Z
|
2022-03-25T12:16:47.000Z
|
xgboost_ray/tests/test_sklearn.py
|
edoakes/xgboost_ray
|
1e16381f42c523caa854bd625ddb969553fee87d
|
[
"Apache-2.0"
] | 104
|
2020-11-07T11:06:29.000Z
|
2022-03-29T07:48:21.000Z
|
xgboost_ray/tests/test_sklearn.py
|
edoakes/xgboost_ray
|
1e16381f42c523caa854bd625ddb969553fee87d
|
[
"Apache-2.0"
] | 22
|
2020-10-13T10:10:07.000Z
|
2022-02-20T08:51:58.000Z
|
"""Copied almost verbatim from https://github.com/dmlc/xgboost/blob/a5c852660b1056204aa2e0cbfcd5b4ecfbf31adf/tests/python/test_with_sklearn.py
in order to ensure 1:1 coverage, with minimal modifications.
Some tests were disabled due to not being applicable for a
distributed setting.""" # noqa: E501
# Copyright 2021 by XGBoost Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# File based on:
# https://github.com/dmlc/xgboost/blob/a5c852660b1056204aa2e0cbfcd5b4ecfbf31adf/tests/python/test_with_sklearn.py
# License:
# https://github.com/dmlc/xgboost/blob/a5c852660b1056204aa2e0cbfcd5b4ecfbf31adf/LICENSE
# import collections
# import importlib.util
import numpy as np
import xgboost as xgb
import unittest
# import io
# from contextlib import redirect_stdout, redirect_stderr
import tempfile
import os
import shutil
import json
import ray
from xgboost_ray.sklearn import (RayXGBClassifier, RayXGBRegressor,
RayXGBRFClassifier, RayXGBRFRegressor,
RayXGBRanker)
from xgboost_ray.main import XGBOOST_VERSION_TUPLE
from xgboost_ray.matrix import RayShardingMode
def softmax(x):
e = np.exp(x)
return e / np.sum(e)
def softprob_obj(classes):
def objective(labels, predt):
rows = labels.shape[0]
grad = np.zeros((rows, classes), dtype=float)
hess = np.zeros((rows, classes), dtype=float)
eps = 1e-6
for r in range(predt.shape[0]):
target = labels[r]
p = softmax(predt[r, :])
for c in range(predt.shape[1]):
assert target >= 0 or target <= classes
g = p[c] - 1.0 if c == target else p[c]
h = max((2.0 * p[c] * (1.0 - p[c])).item(), eps)
grad[r, c] = g
hess[r, c] = h
grad = grad.reshape((rows * classes, 1))
hess = hess.reshape((rows * classes, 1))
return grad, hess
return objective
class TemporaryDirectory(object):
"""Context manager for tempfile.mkdtemp()"""
def __enter__(self):
self.name = tempfile.mkdtemp()
return self.name
def __exit__(self, exc_type, exc_value, traceback):
shutil.rmtree(self.name)
class XGBoostRaySklearnTest(unittest.TestCase):
def setUp(self):
self.seed = 1994
self.rng = np.random.RandomState(self.seed)
def tearDown(self) -> None:
if ray.is_initialized():
ray.shutdown()
def _init_ray(self):
if not ray.is_initialized():
ray.init(num_cpus=4)
def run_binary_classification(self, cls, ray_dmatrix_params=None):
self._init_ray()
from sklearn.datasets import load_digits
from sklearn.model_selection import KFold
digits = load_digits(n_class=2)
y = digits["target"]
X = digits["data"]
kf = KFold(n_splits=2, shuffle=True, random_state=self.rng)
for train_index, test_index in kf.split(X, y):
clf = cls(random_state=42)
xgb_model = clf.fit(
X[train_index],
y[train_index],
eval_metric=["auc", "logloss"],
ray_dmatrix_params=ray_dmatrix_params,
)
preds = xgb_model.predict(
X[test_index], ray_dmatrix_params=ray_dmatrix_params)
labels = y[test_index]
err = sum(1 for i in range(len(preds))
if int(preds[i] > 0.5) != labels[i]) / float(len(preds))
assert err < 0.1
def test_binary_classification(self):
self.run_binary_classification(RayXGBClassifier)
def test_binary_classification_dmatrix_params(self):
self.run_binary_classification(
RayXGBClassifier,
ray_dmatrix_params={"sharding": RayShardingMode.BATCH})
# ray: added for legacy CI test
@unittest.skipIf(XGBOOST_VERSION_TUPLE < (1, 0, 0),
f"not supported in xgb version {xgb.__version__}")
def test_binary_rf_classification(self):
self.run_binary_classification(RayXGBRFClassifier)
def test_multiclass_classification(self):
self._init_ray()
from sklearn.datasets import load_iris
from sklearn.model_selection import KFold
def check_pred(preds, labels, output_margin):
if output_margin:
err = sum(1 for i in range(len(preds))
if preds[i].argmax() != labels[i]) / float(
len(preds))
else:
err = sum(1 for i in range(len(preds))
if preds[i] != labels[i]) / float(len(preds))
assert err < 0.4
iris = load_iris()
y = iris["target"]
X = iris["data"]
kf = KFold(n_splits=2, shuffle=True, random_state=self.rng)
for train_index, test_index in kf.split(X, y):
xgb_model = RayXGBClassifier().fit(X[train_index], y[train_index])
if hasattr(xgb_model.get_booster(), "num_boosted_rounds"):
assert (xgb_model.get_booster().num_boosted_rounds() ==
xgb_model.n_estimators)
preds = xgb_model.predict(X[test_index])
# test other params in XGBClassifier().fit
preds2 = xgb_model.predict(
X[test_index], output_margin=True, ntree_limit=3)
preds3 = xgb_model.predict(
X[test_index], output_margin=True, ntree_limit=0)
preds4 = xgb_model.predict(
X[test_index], output_margin=False, ntree_limit=3)
labels = y[test_index]
check_pred(preds, labels, output_margin=False)
check_pred(preds2, labels, output_margin=True)
check_pred(preds3, labels, output_margin=True)
check_pred(preds4, labels, output_margin=False)
cls = RayXGBClassifier(n_estimators=4).fit(X, y)
assert cls.n_classes_ == 3
proba = cls.predict_proba(X)
assert proba.shape[0] == X.shape[0]
assert proba.shape[1] == cls.n_classes_
# custom objective, the default is multi:softprob
# so no transformation is required.
cls = RayXGBClassifier(
n_estimators=4, objective=softprob_obj(3)).fit(X, y)
proba = cls.predict_proba(X)
assert proba.shape[0] == X.shape[0]
assert proba.shape[1] == cls.n_classes_
@unittest.skipIf(XGBOOST_VERSION_TUPLE < (1, 4, 0),
f"not supported in xgb version {xgb.__version__}")
def test_best_ntree_limit(self):
self._init_ray()
from sklearn.datasets import load_iris
X, y = load_iris(return_X_y=True)
def train(booster, forest):
rounds = 4
cls = RayXGBClassifier(
n_estimators=rounds, num_parallel_tree=forest,
booster=booster).fit(
X, y, eval_set=[(X, y)], early_stopping_rounds=3)
if forest:
assert cls.best_ntree_limit == rounds * forest
else:
assert cls.best_ntree_limit == 0
# best_ntree_limit is used by default,
# assert that under gblinear it's
# automatically ignored due to being 0.
cls.predict(X)
num_parallel_tree = 4
train("gbtree", num_parallel_tree)
train("dart", num_parallel_tree)
train("gblinear", None)
def test_stacking_regression(self):
self._init_ray()
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_diabetes
from sklearn.linear_model import RidgeCV
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import StackingRegressor
X, y = load_diabetes(return_X_y=True)
estimators = [
("gbm", RayXGBRegressor(objective="reg:squarederror")),
("lr", RidgeCV()),
]
reg = StackingRegressor(
estimators=estimators,
final_estimator=RandomForestRegressor(
n_estimators=10, random_state=42),
)
X_train, X_test, y_train, y_test = train_test_split(
X, y, random_state=42)
reg.fit(X_train, y_train).score(X_test, y_test)
def test_stacking_classification(self):
self._init_ray()
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_iris
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import make_pipeline
from sklearn.ensemble import StackingClassifier
X, y = load_iris(return_X_y=True)
estimators = [
("gbm", RayXGBClassifier()),
(
"svr",
make_pipeline(StandardScaler(), LinearSVC(random_state=42)),
),
]
clf = StackingClassifier(
estimators=estimators, final_estimator=LogisticRegression())
X_train, X_test, y_train, y_test = train_test_split(
X, y, random_state=42)
clf.fit(X_train, y_train).score(X_test, y_test)
# exact tree method doesn't support distributed training
# def test_feature_importances_weight(self)
# def test_feature_importances_gain(self)
def test_select_feature(self):
self._init_ray()
from sklearn.datasets import load_digits
from sklearn.feature_selection import SelectFromModel
digits = load_digits(n_class=2)
y = digits["target"]
X = digits["data"]
cls = RayXGBClassifier()
cls.fit(X, y)
selector = SelectFromModel(cls, prefit=True, max_features=1)
X_selected = selector.transform(X)
assert X_selected.shape[1] == 1
def test_num_parallel_tree(self):
self._init_ray()
from sklearn.datasets import load_boston
reg = RayXGBRegressor(
n_estimators=4, num_parallel_tree=4, tree_method="hist")
boston = load_boston()
bst = reg.fit(X=boston["data"], y=boston["target"])
dump = bst.get_booster().get_dump(dump_format="json")
assert len(dump) == 16
if xgb.__version__ != "0.90":
reg = RayXGBRFRegressor(n_estimators=4)
bst = reg.fit(X=boston["data"], y=boston["target"])
dump = bst.get_booster().get_dump(dump_format="json")
assert len(dump) == 4
config = json.loads(bst.get_booster().save_config())
assert (int(config["learner"]["gradient_booster"][
"gbtree_train_param"]["num_parallel_tree"]) == 4)
def test_boston_housing_regression(self):
self._init_ray()
from sklearn.metrics import mean_squared_error
from sklearn.datasets import load_boston
from sklearn.model_selection import KFold
boston = load_boston()
y = boston["target"]
X = boston["data"]
kf = KFold(n_splits=2, shuffle=True, random_state=self.rng)
for train_index, test_index in kf.split(X, y):
xgb_model = RayXGBRegressor().fit(X[train_index], y[train_index])
preds = xgb_model.predict(X[test_index])
# test other params in XGBRegressor().fit
preds2 = xgb_model.predict(
X[test_index], output_margin=True, ntree_limit=3)
preds3 = xgb_model.predict(
X[test_index], output_margin=True, ntree_limit=0)
preds4 = xgb_model.predict(
X[test_index], output_margin=False, ntree_limit=3)
labels = y[test_index]
assert mean_squared_error(preds, labels) < 25
assert mean_squared_error(preds2, labels) < 350
assert mean_squared_error(preds3, labels) < 25
assert mean_squared_error(preds4, labels) < 350
@unittest.skipIf(XGBOOST_VERSION_TUPLE < (1, 0, 0),
f"not supported in xgb version {xgb.__version__}")
def run_boston_housing_rf_regression(self, tree_method):
from sklearn.metrics import mean_squared_error
from sklearn.datasets import load_boston
from sklearn.model_selection import KFold
X, y = load_boston(return_X_y=True)
kf = KFold(n_splits=2, shuffle=True, random_state=self.rng)
for train_index, test_index in kf.split(X, y):
xgb_model = RayXGBRFRegressor(
random_state=42, tree_method=tree_method).fit(
X[train_index], y[train_index])
preds = xgb_model.predict(X[test_index])
labels = y[test_index]
assert mean_squared_error(preds, labels) < 35
def test_boston_housing_rf_regression(self):
self._init_ray()
self.run_boston_housing_rf_regression("hist")
def test_parameter_tuning(self):
self._init_ray()
from sklearn.model_selection import GridSearchCV
from sklearn.datasets import load_boston
boston = load_boston()
y = boston["target"]
X = boston["data"]
xgb_model = RayXGBRegressor(learning_rate=0.1)
clf = GridSearchCV(
xgb_model,
{
"max_depth": [2, 4, 6],
"n_estimators": [50, 100, 200]
},
cv=3,
verbose=1,
)
clf.fit(X, y)
assert clf.best_score_ < 0.7
assert clf.best_params_ == {"n_estimators": 100, "max_depth": 4}
def test_regression_with_custom_objective(self):
self._init_ray()
from sklearn.metrics import mean_squared_error
from sklearn.datasets import load_boston
from sklearn.model_selection import KFold
def objective_ls(y_true, y_pred):
grad = y_pred - y_true
hess = np.ones(len(y_true))
return grad, hess
boston = load_boston()
y = boston["target"]
X = boston["data"]
kf = KFold(n_splits=2, shuffle=True, random_state=self.rng)
for train_index, test_index in kf.split(X, y):
xgb_model = RayXGBRegressor(objective=objective_ls).fit(
X[train_index], y[train_index])
preds = xgb_model.predict(X[test_index])
labels = y[test_index]
assert mean_squared_error(preds, labels) < 25
# Test that the custom objective function is actually used
class XGBCustomObjectiveException(Exception):
pass
def dummy_objective(y_true, y_pred):
raise XGBCustomObjectiveException()
xgb_model = RayXGBRegressor(objective=dummy_objective)
# TODO figure out how to assertRaises XGBCustomObjectiveException
with self.assertRaises(RuntimeError):
xgb_model.fit(X, y)
def test_classification_with_custom_objective(self):
self._init_ray()
from sklearn.datasets import load_digits
from sklearn.model_selection import KFold
def logregobj(y_true, y_pred):
y_pred = 1.0 / (1.0 + np.exp(-y_pred))
grad = y_pred - y_true
hess = y_pred * (1.0 - y_pred)
return grad, hess
digits = load_digits(n_class=2)
y = digits["target"]
X = digits["data"]
kf = KFold(n_splits=2, shuffle=True, random_state=self.rng)
for train_index, test_index in kf.split(X, y):
xgb_model = RayXGBClassifier(objective=logregobj)
xgb_model.fit(X[train_index], y[train_index])
preds = xgb_model.predict(X[test_index])
labels = y[test_index]
err = sum(1 for i in range(len(preds))
if int(preds[i] > 0.5) != labels[i]) / float(len(preds))
assert err < 0.1
# Test that the custom objective function is actually used
class XGBCustomObjectiveException(Exception):
pass
def dummy_objective(y_true, y_preds):
raise XGBCustomObjectiveException()
xgb_model = RayXGBClassifier(objective=dummy_objective)
# TODO figure out how to assertRaises XGBCustomObjectiveException
with self.assertRaises(RuntimeError):
xgb_model.fit(X, y)
# cls = RayXGBClassifier(use_label_encoder=False)
# cls.fit(X, y)
# is_called = [False]
# def wrapped(y, p):
# is_called[0] = True
# return logregobj(y, p)
# cls.set_params(objective=wrapped)
# cls.predict(X) # no throw
# cls.fit(X, y)
# assert is_called[0]
def test_sklearn_api(self):
self._init_ray()
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
iris = load_iris()
tr_d, te_d, tr_l, te_l = train_test_split(
iris.data, iris.target, train_size=120, test_size=0.2)
classifier = RayXGBClassifier(
booster="gbtree", n_estimators=10, random_state=self.seed)
classifier.fit(tr_d, tr_l)
preds = classifier.predict(te_d)
labels = te_l
err = (sum([1 for p, l in zip(preds, labels)
if p != l]) * 1.0 / len(te_l))
assert err < 0.2
def test_sklearn_api_gblinear(self):
self._init_ray()
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
iris = load_iris()
tr_d, te_d, tr_l, te_l = train_test_split(
iris.data, iris.target, train_size=120)
classifier = RayXGBClassifier(
booster="gblinear", n_estimators=100, random_state=self.seed)
classifier.fit(tr_d, tr_l)
preds = classifier.predict(te_d)
labels = te_l
err = (sum([1 for p, l in zip(preds, labels)
if p != l]) * 1.0 / len(te_l))
assert err < 0.5
@unittest.skipIf(XGBOOST_VERSION_TUPLE < (1, 0, 0),
f"not supported in xgb version {xgb.__version__}")
def test_sklearn_random_state(self):
self._init_ray()
clf = RayXGBClassifier(random_state=402)
assert clf.get_xgb_params()["random_state"] == 402
clf = RayXGBClassifier(random_state=401)
assert clf.get_xgb_params()["random_state"] == 401
random_state = np.random.RandomState(seed=403)
clf = RayXGBClassifier(random_state=random_state)
assert isinstance(clf.get_xgb_params()["random_state"], int)
@unittest.skipIf(XGBOOST_VERSION_TUPLE < (1, 0, 0),
f"not supported in xgb version {xgb.__version__}")
def test_sklearn_n_jobs(self):
self._init_ray()
clf = RayXGBClassifier(n_jobs=1)
assert clf.get_xgb_params()["n_jobs"] == 1
clf = RayXGBClassifier(n_jobs=2)
assert clf.get_xgb_params()["n_jobs"] == 2
@unittest.skipIf(XGBOOST_VERSION_TUPLE < (1, 3, 0),
f"not supported in xgb version {xgb.__version__}")
def test_parameters_access(self):
self._init_ray()
from sklearn import datasets
params = {"updater": "grow_gpu_hist", "subsample": 0.5, "n_jobs": -1}
clf = RayXGBClassifier(n_estimators=1000, **params)
assert clf.get_params()["updater"] == "grow_gpu_hist"
assert clf.get_params()["subsample"] == 0.5
assert clf.get_params()["n_estimators"] == 1000
clf = RayXGBClassifier(n_estimators=1, nthread=4)
X, y = datasets.load_iris(return_X_y=True)
clf.fit(X, y)
config = json.loads(clf.get_booster().save_config())
assert int(config["learner"]["generic_param"]["nthread"]) == 4
clf.set_params(nthread=16)
config = json.loads(clf.get_booster().save_config())
assert int(config["learner"]["generic_param"]["nthread"]) == 16
clf.predict(X)
config = json.loads(clf.get_booster().save_config())
assert int(config["learner"]["generic_param"]["nthread"]) == 16
def test_kwargs_error(self):
self._init_ray()
params = {"updater": "grow_gpu_hist", "subsample": 0.5, "n_jobs": -1}
with self.assertRaises(TypeError):
clf = RayXGBClassifier(n_jobs=1000, **params)
assert isinstance(clf, RayXGBClassifier)
def test_kwargs_grid_search(self):
self._init_ray()
from sklearn.model_selection import GridSearchCV
from sklearn import datasets
params = {"tree_method": "hist"}
clf = RayXGBClassifier(n_estimators=1, learning_rate=1.0, **params)
assert clf.get_params()["tree_method"] == "hist"
# 'max_leaves' is not a default argument of XGBClassifier
# Check we can still do grid search over this parameter
search_params = {"max_leaves": range(2, 5)}
grid_cv = GridSearchCV(clf, search_params, cv=5)
iris = datasets.load_iris()
grid_cv.fit(iris.data, iris.target)
# Expect unique results for each parameter value
# This confirms sklearn is able to successfully update the parameter
means = grid_cv.cv_results_["mean_test_score"]
assert len(means) == len(set(means))
def test_sklearn_clone(self):
self._init_ray()
from sklearn.base import clone
clf = RayXGBClassifier(n_jobs=2)
clf.n_jobs = -1
clone(clf)
@unittest.skipIf(XGBOOST_VERSION_TUPLE < (1, 0, 0),
f"not supported in xgb version {xgb.__version__}")
def test_sklearn_get_default_params(self):
self._init_ray()
from sklearn.datasets import load_digits
digits_2class = load_digits(n_class=2)
X = digits_2class["data"]
y = digits_2class["target"]
cls = RayXGBClassifier()
assert cls.get_params()["base_score"] is None
cls.fit(X[:4, ...], y[:4, ...])
assert cls.get_params()["base_score"] is not None
@unittest.skipIf(XGBOOST_VERSION_TUPLE < (1, 1, 0),
f"not supported in xgb version {xgb.__version__}")
def test_validation_weights_xgbmodel(self):
self._init_ray()
from sklearn.datasets import make_hastie_10_2
# prepare training and test data
X, y = make_hastie_10_2(n_samples=2000, random_state=42)
labels, y = np.unique(y, return_inverse=True)
X_train, X_test = X[:1600], X[1600:]
y_train, y_test = y[:1600], y[1600:]
# instantiate model
param_dist = {
"objective": "binary:logistic",
"n_estimators": 2,
"random_state": 123,
}
clf = xgb.sklearn.XGBModel(**param_dist)
# train it using instance weights only in the training set
weights_train = np.random.choice([1, 2], len(X_train))
clf.fit(
X_train,
y_train,
sample_weight=weights_train,
eval_set=[(X_test, y_test)],
eval_metric="logloss",
verbose=False,
)
# evaluate logloss metric on test set *without* using weights
evals_result_without_weights = clf.evals_result()
logloss_without_weights = evals_result_without_weights["validation_0"][
"logloss"]
# now use weights for the test set
np.random.seed(0)
weights_test = np.random.choice([1, 2], len(X_test))
clf.fit(
X_train,
y_train,
sample_weight=weights_train,
eval_set=[(X_test, y_test)],
sample_weight_eval_set=[weights_test],
eval_metric="logloss",
verbose=False,
)
evals_result_with_weights = clf.evals_result()
logloss_with_weights = evals_result_with_weights["validation_0"][
"logloss"]
# check that the logloss in the test set is actually different
# when using weights than when not using them
assert all((logloss_with_weights[i] != logloss_without_weights[i]
for i in [0, 1]))
with self.assertRaises((ValueError, AssertionError)):
# length of eval set and sample weight doesn't match.
clf.fit(
X_train,
y_train,
sample_weight=weights_train,
eval_set=[(X_train, y_train), (X_test, y_test)],
sample_weight_eval_set=[weights_train],
)
with self.assertRaises((ValueError, AssertionError)):
cls = RayXGBClassifier()
cls.fit(
X_train,
y_train,
sample_weight=weights_train,
eval_set=[(X_train, y_train), (X_test, y_test)],
sample_weight_eval_set=[weights_train],
)
def test_validation_weights_xgbclassifier(self):
self._init_ray()
from sklearn.datasets import make_hastie_10_2
# prepare training and test data
X, y = make_hastie_10_2(n_samples=2000, random_state=42)
labels, y = np.unique(y, return_inverse=True)
X_train, X_test = X[:1600], X[1600:]
y_train, y_test = y[:1600], y[1600:]
# instantiate model
param_dist = {
"objective": "binary:logistic",
"n_estimators": 2,
"random_state": 123,
}
clf = RayXGBClassifier(**param_dist)
# train it using instance weights only in the training set
weights_train = np.random.choice([1, 2], len(X_train))
clf.fit(
X_train,
y_train,
sample_weight=weights_train,
eval_set=[(X_test, y_test)],
eval_metric="logloss",
verbose=False,
)
# evaluate logloss metric on test set *without* using weights
evals_result_without_weights = clf.evals_result()
logloss_without_weights = evals_result_without_weights["validation_0"][
"logloss"]
# now use weights for the test set
np.random.seed(0)
weights_test = np.random.choice([1, 2], len(X_test))
clf.fit(
X_train,
y_train,
sample_weight=weights_train,
eval_set=[(X_test, y_test)],
sample_weight_eval_set=[weights_test],
eval_metric="logloss",
verbose=False,
)
evals_result_with_weights = clf.evals_result()
logloss_with_weights = evals_result_with_weights["validation_0"][
"logloss"]
# check that the logloss in the test set is actually different
# when using weights than when not using them
assert all((logloss_with_weights[i] != logloss_without_weights[i]
for i in [0, 1]))
def save_load_model(self, model_path):
from sklearn.datasets import load_digits
from sklearn.model_selection import KFold
digits = load_digits(n_class=2)
y = digits["target"]
X = digits["data"]
kf = KFold(n_splits=2, shuffle=True, random_state=self.rng)
for train_index, test_index in kf.split(X, y):
xgb_model = RayXGBClassifier(use_label_encoder=False).fit(
X[train_index], y[train_index])
xgb_model.save_model(model_path)
xgb_model = RayXGBClassifier()
xgb_model.load_model(model_path)
assert xgb_model.use_label_encoder is False
assert isinstance(xgb_model.classes_, np.ndarray)
assert isinstance(xgb_model._Booster, xgb.Booster)
preds = xgb_model.predict(X[test_index])
labels = y[test_index]
err = sum(1 for i in range(len(preds))
if int(preds[i] > 0.5) != labels[i]) / float(len(preds))
assert err < 0.1
assert xgb_model.get_booster().attr("scikit_learn") is None
# test native booster
preds = xgb_model.predict(X[test_index], output_margin=True)
booster = xgb.Booster(model_file=model_path)
predt_1 = booster.predict(
xgb.DMatrix(X[test_index]), output_margin=True)
assert np.allclose(preds, predt_1)
with self.assertRaises(TypeError):
xgb_model = xgb.XGBModel()
xgb_model.load_model(model_path)
@unittest.skipIf(XGBOOST_VERSION_TUPLE < (1, 3, 0),
f"not supported in xgb version {xgb.__version__}")
def test_save_load_model(self):
self._init_ray()
with TemporaryDirectory() as tempdir:
model_path = os.path.join(tempdir, "digits.model")
self.save_load_model(model_path)
with TemporaryDirectory() as tempdir:
model_path = os.path.join(tempdir, "digits.model.json")
self.save_load_model(model_path)
from sklearn.datasets import load_digits
with TemporaryDirectory() as tempdir:
model_path = os.path.join(tempdir, "digits.model.json")
digits = load_digits(n_class=2)
y = digits["target"]
X = digits["data"]
booster = xgb.train(
{
"tree_method": "hist",
"objective": "binary:logistic"
},
dtrain=xgb.DMatrix(X, y),
num_boost_round=4,
)
predt_0 = booster.predict(xgb.DMatrix(X))
booster.save_model(model_path)
cls = RayXGBClassifier()
cls.load_model(model_path)
proba = cls.predict_proba(X)
assert proba.shape[0] == X.shape[0]
assert proba.shape[1] == 2 # binary
predt_1 = cls.predict_proba(X)[:, 1]
assert np.allclose(predt_0, predt_1)
cls = xgb.XGBModel()
cls.load_model(model_path)
predt_1 = cls.predict(X)
assert np.allclose(predt_0, predt_1)
# # forcing it to be last as it's the longest test by far
# def test_zzzzzzz_RFECV(self):
# self._init_ray()
# from sklearn.datasets import load_boston
# from sklearn.datasets import load_breast_cancer
# from sklearn.datasets import load_iris
# from sklearn.feature_selection import RFECV
# # Regression
# X, y = load_boston(return_X_y=True)
# bst = RayXGBRegressor(
# booster="gblinear",
# learning_rate=0.1,
# n_estimators=10,
# objective="reg:squarederror",
# random_state=0,
# verbosity=0,
# )
# rfecv = RFECV(
# estimator=bst, step=1, cv=3, scoring="neg_mean_squared_error")
# rfecv.fit(X, y)
# # Binary classification
# X, y = load_breast_cancer(return_X_y=True)
# bst = RayXGBClassifier(
# booster="gblinear",
# learning_rate=0.1,
# n_estimators=10,
# objective="binary:logistic",
# random_state=0,
# verbosity=0,
# use_label_encoder=False,
# )
# rfecv = RFECV(estimator=bst, step=1, cv=3, scoring="roc_auc")
# rfecv.fit(X, y)
# # Multi-class classification
# X, y = load_iris(return_X_y=True)
# bst = RayXGBClassifier(
# base_score=0.4,
# booster="gblinear",
# learning_rate=0.1,
# n_estimators=10,
# objective="multi:softprob",
# random_state=0,
# reg_alpha=0.001,
# reg_lambda=0.01,
# scale_pos_weight=0.5,
# verbosity=0,
# use_label_encoder=False,
# )
# rfecv = RFECV(estimator=bst, step=1, cv=3, scoring="neg_log_loss")
# rfecv.fit(X, y)
# X[0:4, :] = np.nan # verify scikit_learn doesn't throw with nan
# reg = RayXGBRegressor()
# rfecv = RFECV(estimator=reg)
# rfecv.fit(X, y)
# cls = RayXGBClassifier(use_label_encoder=False)
# rfecv = RFECV(
# estimator=cls, step=1, cv=3, scoring="neg_mean_squared_error")
# rfecv.fit(X, y)
def test_XGBClassifier_resume(self):
self._init_ray()
from sklearn.datasets import load_breast_cancer
from sklearn.metrics import log_loss
with TemporaryDirectory() as tempdir:
model1_path = os.path.join(tempdir, "test_XGBClassifier.model")
model1_booster_path = os.path.join(tempdir,
"test_XGBClassifier.booster")
X, Y = load_breast_cancer(return_X_y=True)
model1 = RayXGBClassifier(
learning_rate=0.3, random_state=0, n_estimators=8)
model1.fit(X, Y)
pred1 = model1.predict(X)
log_loss1 = log_loss(pred1, Y)
# file name of stored xgb model
model1.save_model(model1_path)
model2 = RayXGBClassifier(
learning_rate=0.3, random_state=0, n_estimators=8)
model2.fit(X, Y, xgb_model=model1_path)
pred2 = model2.predict(X)
log_loss2 = log_loss(pred2, Y)
assert np.any(pred1 != pred2)
assert log_loss1 > log_loss2
# file name of 'Booster' instance Xgb model
model1.get_booster().save_model(model1_booster_path)
model2 = RayXGBClassifier(
learning_rate=0.3, random_state=0, n_estimators=8)
model2.fit(X, Y, xgb_model=model1_booster_path)
pred2 = model2.predict(X)
log_loss2 = log_loss(pred2, Y)
assert np.any(pred1 != pred2)
assert log_loss1 > log_loss2
@unittest.skipIf(XGBOOST_VERSION_TUPLE < (1, 0, 0),
f"not supported in xgb version {xgb.__version__}")
def test_constraint_parameters(self):
self._init_ray()
reg = RayXGBRegressor(interaction_constraints="[[0, 1], [2, 3, 4]]")
X = np.random.randn(10, 10)
y = np.random.randn(10)
reg.fit(X, y)
config = json.loads(reg.get_booster().save_config())
assert (config["learner"]["gradient_booster"]["updater"]["prune"][
"train_param"]["interaction_constraints"] == "[[0, 1], [2, 3, 4]]")
# TODO check why this is not working (output is empty, probably due to Ray)
# def test_parameter_validation(self):
# self._init_ray()
# reg = RayXGBRegressor(foo='bar', verbosity=1)
# X = np.random.randn(10, 10)
# y = np.random.randn(10)
# out = io.StringIO()
# err = io.StringIO()
# with redirect_stdout(out), redirect_stderr(err):
# reg.fit(X, y)
# output = out.getvalue().strip()
# print(output)
# assert output.find('foo') != -1
# reg = RayXGBRegressor(n_estimators=2,
# missing=3,
# importance_type='gain',
# verbosity=1)
# X = np.random.randn(10, 10)
# y = np.random.randn(10)
# out = io.StringIO()
# err = io.StringIO()
# with redirect_stdout(out), redirect_stderr(err):
# reg.fit(X, y)
# output = out.getvalue().strip()
# assert len(output) == 0
# def test_deprecate_position_arg(self):
# self._init_ray()
# from sklearn.datasets import load_digits
# X, y = load_digits(return_X_y=True, n_class=2)
# w = y
# with self.assertWarns(FutureWarning):
# RayXGBRegressor(3, learning_rate=0.1)
# model = RayXGBRegressor(n_estimators=1)
# with self.assertWarns(FutureWarning):
# model.fit(X, y, w)
# with self.assertWarns(FutureWarning):
# RayXGBClassifier(1, use_label_encoder=False)
# model = RayXGBClassifier(n_estimators=1, use_label_encoder=False)
# with self.assertWarns(FutureWarning):
# model.fit(X, y, w)
# with self.assertWarns(FutureWarning):
# RayXGBRanker("rank:ndcg", learning_rate=0.1)
# model = RayXGBRanker(n_estimators=1)
# group = np.repeat(1, X.shape[0])
# with self.assertWarns(FutureWarning):
# model.fit(X, y, group)
# with self.assertWarns(FutureWarning):
# RayXGBRFRegressor(1, learning_rate=0.1)
# model = RayXGBRFRegressor(n_estimators=1)
# with self.assertWarns(FutureWarning):
# model.fit(X, y, w)
# with self.assertWarns(FutureWarning):
# RayXGBRFClassifier(1, use_label_encoder=True)
# model = RayXGBRFClassifier(n_estimators=1)
# with self.assertWarns(FutureWarning):
# model.fit(X, y, w)
def test_pandas_input(self):
self._init_ray()
import pandas as pd
from sklearn.calibration import CalibratedClassifierCV
rng = np.random.RandomState(self.seed)
kRows = 100
kCols = 6
X = rng.randint(low=0, high=2, size=kRows * kCols)
X = X.reshape(kRows, kCols)
df = pd.DataFrame(X)
feature_names = []
for i in range(1, kCols):
feature_names += ["k" + str(i)]
df.columns = ["status"] + feature_names
target = df["status"]
train = df.drop(columns=["status"])
model = RayXGBClassifier()
model.fit(train, target)
clf_isotonic = CalibratedClassifierCV(
model, cv="prefit", method="isotonic")
clf_isotonic.fit(train, target)
assert isinstance(
clf_isotonic.calibrated_classifiers_[0].base_estimator,
RayXGBClassifier,
)
self.assertTrue(
np.allclose(np.array(clf_isotonic.classes_), np.array([0, 1])))
# def run_feature_weights(self, X, y, fw, model=RayXGBRegressor):
# with TemporaryDirectory() as tmpdir:
# colsample_bynode = 0.5
# reg = model(
# tree_method='hist', colsample_bynode=colsample_bynode
# )
# reg.fit(X, y, feature_weights=fw)
# model_path = os.path.join(tmpdir, 'model.json')
# reg.save_model(model_path)
# with open(model_path) as fd:
# model = json.load(fd)
# parser_path = os.path.join(tm.PROJECT_ROOT, 'demo', 'json-model',
# 'json_parser.py')
# spec = importlib.util.spec_from_file_location(
# "JsonParser", parser_path)
# foo = importlib.util.module_from_spec(spec)
# spec.loader.exec_module(foo)
# model = foo.Model(model)
# splits = {}
# total_nodes = 0
# for tree in model.trees:
# n_nodes = len(tree.nodes)
# total_nodes += n_nodes
# for n in range(n_nodes):
# if tree.is_leaf(n):
# continue
# if splits.get(tree.split_index(n), None) is None:
# splits[tree.split_index(n)] = 1
# else:
# splits[tree.split_index(n)] += 1
# od = collections.OrderedDict(sorted(splits.items()))
# tuples = [(k, v) for k, v in od.items()]
# k, v = list(zip(*tuples))
# w = np.polyfit(k, v, deg=1)
# return w
# def test_feature_weights(self):
# kRows = 512
# kCols = 64
# X = self.rng.randn(kRows, kCols)
# y = self.rng.randn(kRows)
# fw = np.ones(shape=(kCols, ))
# for i in range(kCols):
# fw[i] *= float(i)
# poly_increasing = self.run_feature_weights(X, y, fw, RayXGBRegressor)
# fw = np.ones(shape=(kCols, ))
# for i in range(kCols):
# fw[i] *= float(kCols - i)
# poly_decreasing = self.run_feature_weights(X, y, fw, RayXGBRegressor)
# # Approxmated test, this is dependent on the implementation of random
# # number generator in std library.
# assert poly_increasing[0] > 0.08
# assert poly_decreasing[0] < -0.08
def run_boost_from_prediction(self, tree_method):
from sklearn.datasets import load_breast_cancer
X, y = load_breast_cancer(return_X_y=True)
model_0 = RayXGBClassifier(
learning_rate=0.3,
random_state=0,
n_estimators=4,
tree_method=tree_method,
)
model_0.fit(X=X, y=y)
margin = model_0.predict(X, output_margin=True)
model_1 = RayXGBClassifier(
learning_rate=0.3,
random_state=0,
n_estimators=4,
tree_method=tree_method,
)
model_1.fit(X=X, y=y, base_margin=margin)
predictions_1 = model_1.predict(X, base_margin=margin)
cls_2 = RayXGBClassifier(
learning_rate=0.3,
random_state=0,
n_estimators=8,
tree_method=tree_method,
)
cls_2.fit(X=X, y=y)
predictions_2 = cls_2.predict(X)
assert np.all(predictions_1 == predictions_2)
def boost_from_prediction(self, tree_method):
self._init_ray()
self.run_boost_from_prediction(tree_method)
@unittest.skipIf(XGBOOST_VERSION_TUPLE < (1, 0, 0),
f"not supported in xgb version {xgb.__version__}")
def test_boost_from_prediction_hist(self):
self.run_boost_from_prediction("hist")
@unittest.skipIf(XGBOOST_VERSION_TUPLE < (1, 2, 0),
f"not supported in xgb version {xgb.__version__}")
def test_boost_from_prediction_approx(self):
self.run_boost_from_prediction("approx")
# Updater `grow_colmaker` or `exact` tree method doesn't support
# distributed training
def test_boost_from_prediction_exact(self):
with self.assertRaises(ValueError):
self.run_boost_from_prediction("exact")
@unittest.skipIf(XGBOOST_VERSION_TUPLE < (1, 4, 0),
f"not supported in xgb version {xgb.__version__}")
def test_estimator_type(self):
self._init_ray()
assert RayXGBClassifier._estimator_type == "classifier"
assert RayXGBRFClassifier._estimator_type == "classifier"
assert RayXGBRegressor._estimator_type == "regressor"
assert RayXGBRFRegressor._estimator_type == "regressor"
assert RayXGBRanker._estimator_type == "ranker"
from sklearn.datasets import load_digits
X, y = load_digits(n_class=2, return_X_y=True)
cls = RayXGBClassifier(n_estimators=2).fit(X, y)
with tempfile.TemporaryDirectory() as tmpdir:
path = os.path.join(tmpdir, "cls.json")
cls.save_model(path)
reg = RayXGBRegressor()
with self.assertRaises(TypeError):
reg.load_model(path)
cls = RayXGBClassifier()
cls.load_model(path) # no error
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
| 35.917628
| 142
| 0.595266
|
34cf93f7e9573765c178e1cb4a839324a5421ee0
| 9,386
|
py
|
Python
|
recipe/tests/test_recipe_api.py
|
Taythaytu0905/Django_Advance
|
e63e34787d3c3a648c2f434a8c99d81e347b7b6a
|
[
"MIT"
] | null | null | null |
recipe/tests/test_recipe_api.py
|
Taythaytu0905/Django_Advance
|
e63e34787d3c3a648c2f434a8c99d81e347b7b6a
|
[
"MIT"
] | null | null | null |
recipe/tests/test_recipe_api.py
|
Taythaytu0905/Django_Advance
|
e63e34787d3c3a648c2f434a8c99d81e347b7b6a
|
[
"MIT"
] | null | null | null |
import tempfile
from PIL import Image
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Recipe, Tag, Ingredient
from recipe.serializers import RecipeSerializer, RecipeDetailSerializer
RECIPES_URL = reverse('recipe:recipe-list')
def image_url(recipe_id):
"""Return URL for recipe image upload"""
return reverse('recipe:recipe-upload-image', args=[recipe_id])
def detail_url(recipe_id):
"""Return recipe detail URL"""
return reverse('recipe:recipe-detail', args=[recipe_id])
def sample_tag(user, name='Sample tag'):
"""Create and return a sample tag"""
return Tag.objects.create(user=user, name=name)
def sample_ingredient(user, name='Sample ingredient'):
"""Create and return a sample ingredient"""
return Ingredient.objects.create(user=user, name=name)
def sample_recipe(user, **params):
"""Create and return a sample recipe"""
defaults = {
'title': 'Sample recipe',
'time_minutes': 10,
'price': 5.00
}
defaults.update(params)
return Recipe.objects.create(user=user, **defaults)
class PublicRecipesApiTests(TestCase):
"""Test the public available recipes API"""
def setUp(self):
self.client = APIClient()
def test_login_required(self):
"""Test that login is required for retrieving recipes"""
res = self.client.get(RECIPES_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateRecipesApiTests(TestCase):
"""Test the authorized user recipe API"""
def setUp(self):
self.user = get_user_model().objects.create_user(
'do@gmail.com', 'testpass')
self.client = APIClient()
self.client.force_authenticate(user=self.user)
def test_retrieve_recipes(self):
"""Test retrieving recipe"""
sample_recipe(user=self.user)
sample_recipe(user=self.user)
res = self.client.get(RECIPES_URL)
recipes = Recipe.objects.all().order_by('-id')
serializer = RecipeSerializer(recipes, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_recipes_limited_to_user(self):
"""Test that recipe returned are for the authenticated user"""
user2 = get_user_model().objects.create_user(
'do1@gmail.com', 'testpass')
sample_recipe(user=user2)
recipe = sample_recipe(user=self.user)
res = self.client.get(RECIPES_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['title'], recipe.title)
def test_view_recipe_detail(self):
"""Test viewing a recipe detail"""
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
recipe.ingredients.add(sample_ingredient(user=self.user))
res = self.client.get(detail_url(recipe.id))
serializer = RecipeDetailSerializer(recipe)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_create_basic_recipe(self):
"""Test creating recipe"""
payload = {
'title': "Chocolate",
'time_minutes': 30,
'price': 50
}
res = self.client.post(RECIPES_URL, payload)
recipe = Recipe.objects.get(id=res.data['id'])
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
for key in payload.keys():
self.assertEqual(payload[key], getattr(recipe, key))
def test_create_recipe_with_tags(self):
"""Test creating a recipe with tags"""
tag1 = sample_tag(user=self.user, name='Vegan')
tag2 = sample_tag(user=self.user, name='Dessert')
payload = {
'title': "Chocolate",
'time_minutes': 30,
'price': 50.00,
"tags": [tag1.id, tag2.id]
}
res = self.client.post(RECIPES_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
tags = recipe.tags.all()
self.assertEqual(tags.count(), 2)
self.assertIn(tag1, tags)
self.assertIn(tag2, tags)
def test_create_recipe_with_ingredient(self):
"""Test creating a recipe with ingredients"""
ingredient1 = sample_ingredient(user=self.user, name='Vegan')
ingredient2 = sample_ingredient(user=self.user, name='Dessert')
payload = {
'title': "Chocolate",
'time_minutes': 30,
'price': 50.00,
"ingredients": [ingredient1.id, ingredient2.id]
}
res = self.client.post(RECIPES_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
ingredients = recipe.ingredients.all()
self.assertEqual(ingredients.count(), 2)
self.assertIn(ingredient1, ingredients)
self.assertIn(ingredient2, ingredients)
def test_partial_update_recipe(self):
"""Test updating a recipe with patch"""
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
new_tag = sample_tag(user=self.user, name="Nguyen Thanh Do")
payload = {"title": "Title test", "tags": [new_tag.id]}
url = detail_url(recipe.id)
self.client.patch(url, payload)
recipe.refresh_from_db()
self.assertEqual(recipe.title, payload['title'])
tags = recipe.tags.all()
self.assertEqual(len(tags), 1)
self.assertIn(new_tag, tags)
def test_update_recipe(self):
"""Test updating a recipe with put"""
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
payload = {
'title': 'Spaghetti carbonara',
'time_minutes': 25,
'price': 5.00
}
url = detail_url(recipe.id)
self.client.put(url, payload)
recipe.refresh_from_db()
self.assertEqual(recipe.title, payload['title'])
tags = recipe.tags.all()
self.assertEqual(len(tags), 0)
class RecipeImageUploadTests(TestCase):
def setUp(self):
self.user = get_user_model().objects.create_user(
'do@gmail.com', 'testpass')
self.client = APIClient()
self.client.force_authenticate(user=self.user)
self.recipe = sample_recipe(user=self.user)
def tearDown(self):
self.recipe.image.delete()
def test_upload_image_to_recipe(self):
"""Test uploading an email to recipe"""
url = image_url(self.recipe.id)
with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf:
img = Image.new('RGB', (10, 10))
img.save(ntf, format='JPEG')
ntf.seek(0)
res = self.client.post(url, {"image": ntf}, format='multipart')
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertIn('image', res.data)
def test_upload_image_bad_request(self):
"""Test uploading an invalid image"""
url = image_url(self.recipe.id)
res = self.client.post(url, {"image": "no_image"}, format='multipart')
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_filter_recipes_by_tag(self):
"""Test returning recipes with specific tags"""
recipe1 = sample_recipe(user=self.user, title='Do')
recipe2 = sample_recipe(user=self.user, title='Tin')
tag1 = sample_tag(user=self.user, name="Test tag")
tag2 = sample_tag(user=self.user, name="Test tag2")
recipe1.tags.add(tag1)
recipe2.tags.add(tag2)
recipe3 = sample_recipe(user=self.user, title="Test 3")
res = self.client.get(RECIPES_URL, {"tags": f'{tag1.id},{tag2.id}'})
serializer1 = RecipeSerializer(recipe1)
serializer2 = RecipeSerializer(recipe2)
serializer3 = RecipeSerializer(recipe3)
self.assertIn(serializer1.data, res.data)
self.assertIn(serializer2.data, res.data)
self.assertNotIn(serializer3.data, res.data)
def test_filter_recipes_by_ingredients(self):
"""Test returning recipes with specific ingredients"""
recipe1 = sample_recipe(user=self.user, title='Do')
recipe2 = sample_recipe(user=self.user, title='Tin')
ingredient1 = sample_ingredient(user=self.user,
name="Test ingredient")
ingredient2 = sample_ingredient(user=self.user,
name="Test ingredient2")
recipe1.ingredients.add(ingredient1)
recipe2.ingredients.add(ingredient2)
recipe3 = sample_recipe(user=self.user, title="Test 3")
res = self.client.get(RECIPES_URL, {
"ingredients": f'{ingredient1.id},{ingredient2.id}'})
serializer1 = RecipeSerializer(recipe1)
serializer2 = RecipeSerializer(recipe2)
serializer3 = RecipeSerializer(recipe3)
self.assertIn(serializer1.data, res.data)
self.assertIn(serializer2.data, res.data)
self.assertNotIn(serializer3.data, res.data)
| 38.467213
| 78
| 0.645003
|
a6e63fbfeb3d6ad5bd2ca0b65c9e93042ef5cbb7
| 9,923
|
py
|
Python
|
src/abaqus/Load/ConcentratedForce.py
|
Haiiliin/PyAbaqus
|
f20db6ebea19b73059fe875a53be370253381078
|
[
"MIT"
] | 7
|
2022-01-21T09:15:45.000Z
|
2022-02-15T09:31:58.000Z
|
src/abaqus/Load/ConcentratedForce.py
|
Haiiliin/PyAbaqus
|
f20db6ebea19b73059fe875a53be370253381078
|
[
"MIT"
] | null | null | null |
src/abaqus/Load/ConcentratedForce.py
|
Haiiliin/PyAbaqus
|
f20db6ebea19b73059fe875a53be370253381078
|
[
"MIT"
] | null | null | null |
import typing
from abaqusConstants import *
from .Load import Load
from ..Region.Region import Region
class ConcentratedForce(Load):
"""The ConcentratedForce object defines a concentrated force.
The ConcentratedForce object is derived from the Load object.
Attributes
----------
name: str
A String specifying the load repository key.
distributionType: SymbolicConstant
A SymbolicConstant specifying how the load is distributed spatially. Possible values are
UNIFORM and FIELD. The default value is UNIFORM.
follower: Boolean
A Boolean specifying whether the direction of the force rotates with the rotation at
each node of the region. You should provide the **follower** argument only if it is valid
for the specified step. The default value is OFF.
localCsys: int
None or a :py:class:`~abaqus.Datum.DatumCsys.DatumCsys` object specifying the local coordinate system of the load's degrees
of freedom. If **localCsys=None**, the degrees of freedom are defined in the global
coordinate system. When this member is queried, it returns an Int. The default value is
None.
field: str
A String specifying the name of the :py:class:`~abaqus.Field.AnalyticalField.AnalyticalField` object associated with this load.
The **field** argument applies only when **distributionType=FIELD**. The default value is an
empty string.
region: Region
A :py:class:`~abaqus.Region.Region.Region` object specifying the region to which the load is applied.
Notes
-----
This object can be accessed by:
.. code-block:: python
import load
mdb.models[name].loads[name]
"""
# A String specifying the load repository key.
name: str = ''
# A SymbolicConstant specifying how the load is distributed spatially. Possible values are
# UNIFORM and FIELD. The default value is UNIFORM.
distributionType: SymbolicConstant = UNIFORM
# A Boolean specifying whether the direction of the force rotates with the rotation at
# each node of the region. You should provide the *follower* argument only if it is valid
# for the specified step. The default value is OFF.
follower: Boolean = OFF
# None or a DatumCsys object specifying the local coordinate system of the load's degrees
# of freedom. If *localCsys*=None, the degrees of freedom are defined in the global
# coordinate system. When this member is queried, it returns an Int. The default value is
# None.
localCsys: int = None
# A String specifying the name of the AnalyticalField object associated with this load.
# The *field* argument applies only when *distributionType*=FIELD. The default value is an
# empty string.
field: str = ''
# A Region object specifying the region to which the load is applied.
region: Region = Region()
def __init__(self, name: str, createStepName: str, region: Region,
distributionType: SymbolicConstant = UNIFORM, field: str = '', cf1: float = None,
cf2: float = None, cf3: float = None, amplitude: str = UNSET, follower: Boolean = OFF,
localCsys: int = None):
"""This method creates a ConcentratedForce object.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].ConcentratedForce
Parameters
----------
name
A String specifying the load repository key.
createStepName
A String specifying the name of the step in which the load is created.
region
A Region object specifying the region to which the load is applied.
distributionType
A SymbolicConstant specifying how the load is distributed spatially. Possible values are
UNIFORM and FIELD. The default value is UNIFORM.
field
A String specifying the name of the AnalyticalField object associated with this load.
The *field* argument applies only when *distributionType*=FIELD. The default value is an
empty string.
cf1
A Float or a Complex specifying the concentrated force component in the 1-direction.
Although *cf1*, *cf2*, and *cf3* are optional arguments, at least one of them must be
nonzero.
cf2
A Float or a Complex specifying the concentrated force component in the 2-direction.
cf3
A Float or a Complex specifying the concentrated force component in the 3-direction.
amplitude
A String or the SymbolicConstant UNSET specifying the name of the amplitude reference.
UNSET should be used if the load has no amplitude reference. The default value is UNSET.
You should provide the *amplitude* argument only if it is valid for the specified step.
follower
A Boolean specifying whether the direction of the force rotates with the rotation at
each node of the region. You should provide the *follower* argument only if it is valid
for the specified step. The default value is OFF.
localCsys
None or a DatumCsys object specifying the local coordinate system of the load's degrees
of freedom. If *localCsys*=None, the degrees of freedom are defined in the global
coordinate system. When this member is queried, it returns an Int. The default value is
None.
Returns
-------
A ConcentratedForce object.
"""
super().__init__()
pass
def setValues(self, distributionType: SymbolicConstant = UNIFORM, field: str = '', cf1: float = None,
cf2: float = None, cf3: float = None, amplitude: str = UNSET, follower: Boolean = OFF,
localCsys: int = None):
"""This method modifies the data for an existing ConcentratedForce object in the step where
it is created.
Parameters
----------
distributionType
A SymbolicConstant specifying how the load is distributed spatially. Possible values are
UNIFORM and FIELD. The default value is UNIFORM.
field
A String specifying the name of the AnalyticalField object associated with this load.
The *field* argument applies only when *distributionType*=FIELD. The default value is an
empty string.
cf1
A Float or a Complex specifying the concentrated force component in the 1-direction.
Although *cf1*, *cf2*, and *cf3* are optional arguments, at least one of them must be
nonzero.
cf2
A Float or a Complex specifying the concentrated force component in the 2-direction.
cf3
A Float or a Complex specifying the concentrated force component in the 3-direction.
amplitude
A String or the SymbolicConstant UNSET specifying the name of the amplitude reference.
UNSET should be used if the load has no amplitude reference. The default value is UNSET.
You should provide the *amplitude* argument only if it is valid for the specified step.
follower
A Boolean specifying whether the direction of the force rotates with the rotation at
each node of the region. You should provide the *follower* argument only if it is valid
for the specified step. The default value is OFF.
localCsys
None or a DatumCsys object specifying the local coordinate system of the load's degrees
of freedom. If *localCsys*=None, the degrees of freedom are defined in the global
coordinate system. When this member is queried, it returns an Int. The default value is
None.
"""
pass
def setValuesInStep(self, stepName: str,
cf1: typing.Union[SymbolicConstant, float] = None,
cf2: typing.Union[SymbolicConstant, float] = None,
cf3: typing.Union[SymbolicConstant, float] = None,
amplitude: str = ''):
"""This method modifies the propagating data for an existing ConcentratedForce object in
the specified step.
Parameters
----------
stepName
A String specifying the name of the step in which the load is modified.
cf1
A Float, a Complex, or the SymbolicConstant UNCHANGED specifying the concentrated force
component in the 1-direction. UNCHANGED should be used if the concentrated force
component is propagated from the previous analysis step.
cf2
A Float, a Complex, or the SymbolicConstant UNCHANGED specifying the concentrated force
component in the 2-direction. UNCHANGED should be used if the concentrated force
component is propagated from the previous analysis step.
cf3
A Float, a Complex, or the SymbolicConstant UNCHANGED specifying the concentrated force
component in the 3-direction. UNCHANGED should be used if the concentrated force
component is propagated from the previous analysis step.
amplitude
A String or a SymbolicConstant specifying the name of the amplitude reference. Possible
values for the SymbolicConstant are UNCHANGED and FREED. UNCHANGED should be used if the
amplitude is propagated from the previous analysis step. FREED should be used if the
load is changed to have no amplitude reference. You should provide the *amplitude*
argument only if it is valid for the specified step.
"""
pass
| 49.368159
| 135
| 0.657059
|
e4fba515382456ff1defc8c6eee542eebb5a9466
| 524
|
py
|
Python
|
hms-gateway/scratch-space/configuration-examples/ini_config.py
|
PacktPublishing/Hands-On-Software-Engineering-with-Python
|
b1b57f8989d5e5a40ebcdebf5998324c501a15d9
|
[
"MIT"
] | 40
|
2019-01-03T15:23:45.000Z
|
2022-03-07T10:15:42.000Z
|
hms-gateway/scratch-space/configuration-examples/ini_config.py
|
PacktPublishing/Hands-On-Software-Engineering-with-Python
|
b1b57f8989d5e5a40ebcdebf5998324c501a15d9
|
[
"MIT"
] | 2
|
2020-04-15T16:40:08.000Z
|
2021-03-26T00:18:52.000Z
|
hms-gateway/scratch-space/configuration-examples/ini_config.py
|
PacktPublishing/Hands-On-Software-Engineering-with-Python
|
b1b57f8989d5e5a40ebcdebf5998324c501a15d9
|
[
"MIT"
] | 35
|
2018-08-07T09:00:19.000Z
|
2022-03-19T22:10:37.000Z
|
#!/usr/bin/env python
import configparser
config = configparser.ConfigParser()
config.read('myservice.ini')
for section in config:
# - Show each section's name
print(('-- %s ' % section).ljust(80, '-'))
section = config[section]
# - Show each configured value in the section
for key in section:
value = section.get(key)
print(
(' + %s ' % key).ljust(24, '.')
+ ' (%s) %s' % (
type(value).__name__, value
)
)
print()
| 23.818182
| 49
| 0.526718
|
4139d5e61bf6a1e5ceafd2ccbd069f44d67567a1
| 300
|
py
|
Python
|
atbash/core.py
|
alecbush/python-atbash-cipher-cli
|
d463660f3c8dd353ae7983bf99446a12d00c665d
|
[
"MIT"
] | null | null | null |
atbash/core.py
|
alecbush/python-atbash-cipher-cli
|
d463660f3c8dd353ae7983bf99446a12d00c665d
|
[
"MIT"
] | null | null | null |
atbash/core.py
|
alecbush/python-atbash-cipher-cli
|
d463660f3c8dd353ae7983bf99446a12d00c665d
|
[
"MIT"
] | null | null | null |
def cipher(text=''):
return ''.join(
cipher_char(char) for char in list(str(text))
)
def cipher_char(char=' '):
if not char.isalpha():
return char
offset = ord('A') if char.isupper() else ord('a')
value = ord(char) - offset
return chr((25 - value) + offset)
| 23.076923
| 53
| 0.576667
|
3cdfa9cfa2af174d74d36e3e8342036b9c6d8b8c
| 642
|
py
|
Python
|
main.py
|
aejester/autoclicker
|
17139a56b81b2ebe96a0b530639b63e88c6233b3
|
[
"MIT"
] | null | null | null |
main.py
|
aejester/autoclicker
|
17139a56b81b2ebe96a0b530639b63e88c6233b3
|
[
"MIT"
] | null | null | null |
main.py
|
aejester/autoclicker
|
17139a56b81b2ebe96a0b530639b63e88c6233b3
|
[
"MIT"
] | null | null | null |
from pynput.keyboard import Key, Controller
from pynput import keyboard
print("autoclicker v1.0.0")
print("by aejester (https://github.com/aejester)\n\n")
def choice():
print("""Choose an option:
1: Run a profile
2: Create a profile
3: Remove a profile
4: Edit setting
""")
choice = input("> ")
return int(choice)
while True:
initial = choice()
if initial == 1:
profiles = open(".profiles", "r").read().split("\n")
i = 0
print("Select a profile: ")
for profile in profiles:
print("\t"+str(i+1)+": "+profile)
profile = int(input("> ")) - 1
| 19.454545
| 60
| 0.570093
|
945253a47cd1ddb17be974975dfab26934e308fb
| 3,108
|
py
|
Python
|
scripts/collection_copy.py
|
HumanCellAtlas/dcp-prototype
|
44ca66a266004124f39d7d3e3dd75e9076012ff0
|
[
"MIT"
] | 2
|
2020-02-07T18:12:12.000Z
|
2020-02-11T14:59:03.000Z
|
scripts/collection_copy.py
|
HumanCellAtlas/dcp-prototype
|
44ca66a266004124f39d7d3e3dd75e9076012ff0
|
[
"MIT"
] | 173
|
2020-01-29T17:48:02.000Z
|
2020-03-20T02:52:58.000Z
|
scripts/collection_copy.py
|
HumanCellAtlas/dcp-prototype
|
44ca66a266004124f39d7d3e3dd75e9076012ff0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import json
import logging
import os
import sys
import click
import requests
pkg_root = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) # noqa
sys.path.insert(0, pkg_root) # noqa
logging.basicConfig()
logger = logging.getLogger(__name__)
@click.group()
@click.option("--deployment", default="test", show_default=True, help="The name of the deployment to target.")
@click.pass_context
def cli(ctx, deployment):
os.environ["DEPLOYMENT_STAGE"] = deployment
api_base = {
"dev": "https://api.dev.single-cell.czi.technology/db/v1",
"staging": "https://api.stage.single-cell.czi.technology/dp/v1",
}
ctx.obj["api_base"] = api_base[deployment]
@cli.command()
@click.argument("uuid")
@click.argument("cookie")
@click.pass_context
def copy_collection(ctx, uuid, cookie):
"""
Copy a collection from prod to the passed in deployment. Note that the collection owner
will be set based on the passed in cookie. To retrieve a cookie, sign in (through the website) to the deployment
you are copying the collection to and copy the cookie (starting with cxguser=) from the request header
"""
response = requests.get(f"https://api.cellxgene.cziscience.com/dp/v1/collections/{uuid}")
body = json.loads(response._content)
dataset_assets = []
for dataset in body["datasets"]:
for asset in dataset["dataset_assets"]:
if asset["filetype"] == "H5AD":
dataset_assets.append({"asset_id": asset["id"], "dataset_id": asset["dataset_id"]})
collection_metadata = {
"contact_email": body["contact_email"],
"contact_name": body["contact_name"],
"data_submission_policy_version": body["data_submission_policy_version"],
"description": body["description"],
"links": [],
"name": body["name"],
}
for link in body["links"]:
collection_metadata["links"].append(
{"link_name": link["link_name"], "link_type": link["link_type"], "link_url": link["link_url"]}
)
headers = {"Content-Type": "application/json", "Cookie": cookie, "accept": "application/json"}
response = requests.post(
f"{ctx.obj['api_base']}/collections/", headers=headers, data=json.dumps(collection_metadata)
)
new_collection_uuid = json.loads(response._content)["collection_uuid"]
for asset in dataset_assets:
response = requests.post(
f"https://api.cellxgene.cziscience.com/dp/v1/datasets/{asset['dataset_id']}/asset/{asset['asset_id']}"
)
presigned_s3_uri = json.loads(response._content)["presigned_url"]
dataset_body = {"dataset_uuid": "", "url": presigned_s3_uri}
response = requests.post(
f"{ctx.obj['api_base']}/collections/{new_collection_uuid}/upload-links",
headers=headers,
data=json.dumps(dataset_body),
)
dataset_uuid = json.loads(response._content)
click.echo(f"New Collection_uuid: {new_collection_uuid}, new dataset_uuid: {dataset_uuid}")
if __name__ == "__main__":
cli(obj={})
| 36.139535
| 116
| 0.664093
|
bb7094ab87e6dd923dfcedc7075368012b3fd679
| 2,693
|
py
|
Python
|
lib/network.py
|
jabbalaci/Bash-Utils
|
c6fb115834a221c4aaba8eaa37f650beea45ef29
|
[
"MIT"
] | 73
|
2015-03-31T01:12:26.000Z
|
2021-07-10T19:45:04.000Z
|
lib/network.py
|
doc22940/Bash-Utils
|
c6fb115834a221c4aaba8eaa37f650beea45ef29
|
[
"MIT"
] | 2
|
2017-01-06T17:17:42.000Z
|
2017-08-23T18:35:55.000Z
|
lib/network.py
|
doc22940/Bash-Utils
|
c6fb115834a221c4aaba8eaa37f650beea45ef29
|
[
"MIT"
] | 27
|
2015-01-03T18:51:23.000Z
|
2020-11-15T11:49:51.000Z
|
#!/usr/bin/env python3
"""
Network-related stuff.
# from jplib import network
"""
import json
import re
import socket
from lib import process
from lib.process import get_return_code_of_simple_cmd
from lib.web import get_page
from six.moves.urllib import error, request
URL = 'http://www.google.com'
def is_internet_on(method=1, debug=False):
"""Check if the Internet connection is on."""
if method == 1:
# At my current place we have a wifi that redirects to a login page,
# so we always have a connection. That's why I check the content of
# the fetched webpage.
text = get_page(URL, timeout=3, debug=debug)
if text:
if '<title>Google</title>' in text:
return True
# else:
return False
elif method == 2:
# http://stackoverflow.com/questions/3764291/checking-network-connection
try:
request.urlopen('http://www.google.com', timeout=3)
return True
except error.URLError:
return False
except socket.timeout:
return False
elif method == 3:
cmd = "ping -c 1 www.google.com"
return get_return_code_of_simple_cmd(cmd) == 0
else:
print('# warning: unknown method in is_internet_on()')
def get_my_external_ip():
"""
Get my external IP.
Local IP: http://stackoverflow.com/questions/166506/finding-local-ip-addresses-using-pythons-stdlib
"""
content = get_page("http://jsonip.com/")
if content:
d = json.loads(content)
return d["ip"]
else:
return None
def ping(host, cnt=1):
"""Ping a URL and return the average ping time."""
cmd = 'ping -c {cnt} {url}'.format(url=host, cnt=cnt)
output = [x for x in process.get_simple_cmd_output(cmd).split('\n') if x]
result = re.search('min/avg/max/mdev = (.*)/(.*)/(.*)/(.*) ms', output[-1])
if result:
return float('{0:.2f}'.format(float(result.group(1))))
else:
return None
def fping(host, cnt=1):
"""
Get the avg ping time of a host (in msec).
Instead of ping we use the command fping.
"""
host = host.split(':')[0]
cmd = "fping {host} -C {cnt} -q".format(host=host, cnt=cnt)
res = [float(x) for x in process.get_simple_cmd_output(cmd).strip().split(':')[-1].split() if x != '-']
if len(res) > 0:
return sum(res) / len(res)
else:
return None
#############################################################################
if __name__ == "__main__":
print(is_internet_on())
print(get_my_external_ip())
#
host = 'www.google.com'
print(ping(host, 2))
print(fping(host, 2))
| 27.479592
| 107
| 0.588563
|
3c01e86a319dd9e3f51f6060e451697eb7dd923d
| 1,788
|
py
|
Python
|
snake.py
|
Lee-RoyMannier/snake_game
|
0344405b837b8402ac7ad165bc47d70d7bff6758
|
[
"MIT"
] | null | null | null |
snake.py
|
Lee-RoyMannier/snake_game
|
0344405b837b8402ac7ad165bc47d70d7bff6758
|
[
"MIT"
] | null | null | null |
snake.py
|
Lee-RoyMannier/snake_game
|
0344405b837b8402ac7ad165bc47d70d7bff6758
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 22 18:07:39 2022
@author: Mannier
"""
from turtle import Turtle
STARTING_POSITION = [(0, 0), (-20, 0), (-40, 0)]
MOVE_DISTANCE = 20
UP = 90
DOWN = 270
LEFT = 180
RIGHT = 0
class Snake:
def __init__(self):
self.segments = []
self.create_snake()
self.snake_head = self.segments[0]
def create_snake(self):
for position in STARTING_POSITION:
self.add_segment(position)
def add_segment(self, position):
new_segment = Turtle("square")
new_segment.color("white")
new_segment.penup()
new_segment.goto(position)
self.segments.append(new_segment)
def reset(self):
for seg in self.segments:
seg.goto(1000,1000)
self.segments.clear()
self.create_snake()
self.snake_head = self.segments[0]
def extend(self):
self.add_segment(self.segments[-1].position())
def move(self):
for seg_num in range(len(self.segments) - 1, 0, -1):
new_x = self.segments[seg_num - 1].xcor()
new_y = self.segments[seg_num - 1].ycor()
self.segments[seg_num].goto(new_x, new_y)
self.snake_head.forward(MOVE_DISTANCE)
def up(self):
if self.snake_head.heading != DOWN:
self.snake_head.setheading(UP)
def down(self):
if self.snake_head.heading() != UP:
self.snake_head.setheading(DOWN)
def right(self):
if self.snake_head.heading != LEFT:
self.snake_head.setheading(RIGHT)
def left(self):
if self.snake_head.heading != RIGHT:
self.snake_head.setheading(LEFT)
| 26.686567
| 61
| 0.567114
|
e6b0d8ee6f6bb1b5424e8fad499316821f384728
| 6,569
|
py
|
Python
|
opstestfw/switch/CLI/InterfaceLacpPortIdConfig.py
|
r-cc-c/ops-ft-framework
|
06ffd5a292e9bd0398d76b91dc327bf12025f9e2
|
[
"Apache-2.0"
] | null | null | null |
opstestfw/switch/CLI/InterfaceLacpPortIdConfig.py
|
r-cc-c/ops-ft-framework
|
06ffd5a292e9bd0398d76b91dc327bf12025f9e2
|
[
"Apache-2.0"
] | null | null | null |
opstestfw/switch/CLI/InterfaceLacpPortIdConfig.py
|
r-cc-c/ops-ft-framework
|
06ffd5a292e9bd0398d76b91dc327bf12025f9e2
|
[
"Apache-2.0"
] | 1
|
2021-09-10T08:21:59.000Z
|
2021-09-10T08:21:59.000Z
|
# (C) Copyright 2015 Hewlett Packard Enterprise Development LP
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from opstestfw import *
# import re
# import time
def InterfaceLacpPortIdConfig(**kwargs):
"""
Library function to configure LAG parameters on an interface
:param deviceObj: device object
:type deviceObj: VSwitch device object
:param interface: interface to config
:type interface: int
:param lacpPortId: Range beteen 1 and 65535 to identify port in LACP
:type lacpPortId: int
:return: returnStruct object
:rtype: object
"""
deviceObj = kwargs.get('deviceObj', None)
interface = kwargs.get('interface', None)
lacpPortId = kwargs.get('lacpPortId', None)
# Definition of the return dictionary
returnCode = 0
retStruct = dict()
overallBuffer = []
bufferString = ""
# Check if parameter required is used, if not return error -- Constrains
if deviceObj is None:
LogOutput('error', "Need to pass switch device object deviceObj")
returnCode = 1
returnCls = returnStruct(returnCode=returnCode, buffer=overallBuffer,
data=retStruct)
return returnCls
if lacpPortId is None:
LogOutput('error', "Need to pass the Port ID value")
returnCode = 1
returnCls = returnStruct(returnCode=returnCode, buffer=overallBuffer,
data=retStruct)
return returnCls
###########################################################################
# Navigation to the config Context
###########################################################################
# Get into vtyshelll
returnStructure = deviceObj.VtyshShell(enter=True)
overallBuffer.append(returnStructure.buffer())
returnCode = returnStructure.returnCode()
if returnCode != 0:
LogOutput('error', "Failed to get vtysh prompt")
bufferString = ""
for curLine in overallBuffer:
bufferString += str(curLine)
returnCls = returnStruct(returnCode=returnCode, buffer=bufferString,
data=retStruct)
return returnCls
# Get into config context
returnStructure = deviceObj.ConfigVtyShell(enter=True)
returnCode = returnStructure.returnCode()
overallBuffer.append(returnStructure.buffer())
if returnCode != 0:
LogOutput('error', "Failed to get vtysh config prompt")
bufferString = ""
for curLine in overallBuffer:
bufferString += str(curLine)
returnCls = returnStruct(returnCode=returnCode, buffer=bufferString,
data=retStruct)
return returnCls
###########################################################################
# Getting into the interface
###########################################################################
command = "interface " + str(interface)
returnDevInt = deviceObj.DeviceInteract(command=command)
retCode = returnDevInt['returnCode']
overallBuffer.append(returnDevInt['buffer'])
if retCode != 0:
LogOutput('error',
"Failed to get the interface prompt " + deviceObj.device)
bufferString = ""
for curLine in overallBuffer:
bufferString += str(curLine)
returnCls = returnStruct(returnCode=returnCode, buffer=bufferString,
data=retStruct)
return returnCls
###########################################################################
# Assign a port-ID for lacp
###########################################################################
# if enable is True: Removing this part, because no matter if disable or
# enable always has to set the value
command = "lacp port-id " + str(lacpPortId)
returnDict = deviceObj.DeviceInteract(command=command)
retCode = returnDict['returnCode']
result = retCode
overallBuffer.append(returnDict['buffer'])
if retCode != 0:
opstestfw.LogOutput('error',
"Failed to configure the LACP port ID "
+ str(lacpPortId))
else:
opstestfw.LogOutput('debug',
"LACP port ID assigned " + str(lacpPortId))
###########################################################################
# Process of return to the Root context
###########################################################################
# Get out of the interface context
command = "exit"
returnDict = deviceObj.DeviceInteract(command=command)
retCode = returnDict['returnCode']
overallBuffer.append(returnDict['buffer'])
if retCode != 0:
LogOutput('error', "Failed to exit the interface context")
# Get out of config context
returnStructure = deviceObj.ConfigVtyShell(enter=False)
returnCode = returnStructure.returnCode()
overallBuffer.append(returnStructure.buffer())
if returnCode != 0:
LogOutput('error', "Failed to get out of vtysh config context")
bufferString = ""
for curLine in overallBuffer:
bufferString += str(curLine)
returnCls = returnStruct(returnCode=1, buffer=bufferString)
return returnCls
# Get out of vtyshell
returnStructure = deviceObj.VtyshShell(enter=False)
retCode = returnStructure.returnCode()
overallBuffer.append(returnStructure.buffer())
if retCode != 0:
LogOutput('error', "Failed to exit vty shell")
bufferString = ""
for curLine in overallBuffer:
bufferString += str(curLine)
returnCls = returnStruct(returnCode=1, buffer=bufferString,
data=retStruct)
return returnCls
bufferString = ""
for curLine in overallBuffer:
bufferString += str(curLine)
returnCls = returnStruct(returnCode=result,
buffer=bufferString, data=retStruct)
return returnCls
| 38.869822
| 79
| 0.587456
|
09f8e52ea0c2646226cc7366e4b5cf76f2e9e9c9
| 1,895
|
py
|
Python
|
tests/h/accounts/__init___test.py
|
discodavey/h
|
7bff8478b3a5b936de82ac9fcd89b355f4afd3aa
|
[
"MIT"
] | 2
|
2021-11-07T23:14:54.000Z
|
2021-11-17T10:11:55.000Z
|
tests/h/accounts/__init___test.py
|
0b01/h
|
d13cbc3ec5cf92fbfb40ad360c7a5e0d937fbd14
|
[
"MIT"
] | 16
|
2018-03-14T21:23:46.000Z
|
2019-04-29T18:55:28.000Z
|
tests/h/accounts/__init___test.py
|
0b01/h
|
d13cbc3ec5cf92fbfb40ad360c7a5e0d937fbd14
|
[
"MIT"
] | 1
|
2021-03-12T09:45:04.000Z
|
2021-03-12T09:45:04.000Z
|
# -*- coding: utf-8 -*-
import mock
import pytest
from h import accounts
@pytest.mark.usefixtures('user_service')
class TestGetUser(object):
def test_fetches_user_using_service(self,
factories,
pyramid_config,
pyramid_request,
user_service):
pyramid_config.testing_securitypolicy('userid')
user_service.fetch.return_value = factories.User.build()
accounts.get_user(pyramid_request)
user_service.fetch.assert_called_once_with('userid')
def test_does_not_invalidate_session_if_not_authenticated(self,
pyramid_config,
pyramid_request):
"""
If authenticated_userid is None it shouldn't invalidate the session.
Even though the user with id None obviously won't exist in the db.
This also tests that it doesn't raise a redirect in this case.
"""
pyramid_request.session.invalidate = mock.Mock()
accounts.get_user(pyramid_request)
assert not pyramid_request.session.invalidate.called
def test_returns_user(self,
factories,
pyramid_config,
pyramid_request,
user_service):
pyramid_config.testing_securitypolicy('userid')
user = user_service.fetch.return_value = factories.User.build()
result = accounts.get_user(pyramid_request)
assert result == user
@pytest.fixture
def user_service(pyramid_config):
service = mock.Mock(spec_set=['fetch'])
service.fetch.return_value = None
pyramid_config.register_service(service, name='user')
return service
| 32.672414
| 79
| 0.58153
|
899c2ce8cac04e5c69c931ba1da49ed7d1b25749
| 2,899
|
py
|
Python
|
fleetfmt/reader.py
|
guillermo-jimenez/fleet-format
|
da554ef9cb94cddfd1a33fb5877ae8d2ee8aa035
|
[
"MIT"
] | null | null | null |
fleetfmt/reader.py
|
guillermo-jimenez/fleet-format
|
da554ef9cb94cddfd1a33fb5877ae8d2ee8aa035
|
[
"MIT"
] | null | null | null |
fleetfmt/reader.py
|
guillermo-jimenez/fleet-format
|
da554ef9cb94cddfd1a33fb5877ae8d2ee8aa035
|
[
"MIT"
] | null | null | null |
# Copyright 2020 Pure Storage Inc.
# SPDX-License-Identifier: Apache-2.0
# ==============================================================================
""" Fleet Format File Reader """
from fleetfmt.base import _FileAccessorBase
import io
import pyarrow as pa
import pickle
from .format import FILE_HEAD_SERDES, KEYENTRY_HEAD_SERDES, \
RECORD_HEAD_SERDES, SCHEMA_HEAD_SERDES
from .format import MAGIC
class FileReader(_FileAccessorBase):
"""Use this class with a file open in read mode to read either
keys or records from the file.
Methods:
keys(): Returns all keys stored in the file.
read(keys): Returns the record value(s) for a key or list of keys.
"""
def __init__(self, fh, keymap: dict = None, schema: pa.lib.Schema = None):
super().__init__(fh)
self._keymap = keymap
self._read_header()
if schema is not None:
self._schema = schema
else:
self._read_schema()
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
return False
def keys(self):
"""Returns all keys stored in the Fleet file.
Returns:
(set): A set of keys for all records in the file.
"""
# lazily read keytable on first call
if self._keymap is None:
self._read_keytable()
return self._keymap.keys()
def read(self, keys):
"""Returns the record value(s) for a key or list of keys.
Args:
keys : A single key or a list of keys wholse values should be
read from file.
Returns:
The record value(s) associated with the key(s).
"""
if isinstance(keys, list):
return [self._read_record(key) for key in keys]
else:
return self._read_record(keys)
def _read_record(self, key):
# lazily read keytable on first call
if self._keymap is None:
self._read_keytable()
# get record offset and seek there
roff = self._keymap[key]
self._fh.seek(roff)
# read record size (bytes)
(rsize,) = RECORD_HEAD_SERDES.from_file(self._fh)
buf = self._fh.read(rsize)
# deserialize using pyarrow
# rec = pa.deserialize(buf)
rec = pickle.loads(buf)
# return the non-key components of record
return rec
def _read_schema(self):
# read schema bytes
(hsize,) = SCHEMA_HEAD_SERDES.from_file(self._fh)
buf = self._fh.read(hsize)
# Odd -- pyarrow.read_schema wants a readable buffer, and a bytes
# object is insufficient. So we wrap it back up to pull out the schema
wrap = io.BufferedReader(io.BytesIO(buf))
self._schema = pa.ipc.read_schema(wrap)
| 28.421569
| 80
| 0.57882
|
fc53fdac294f9c5e7424305ddb267eaa29d2381f
| 849
|
py
|
Python
|
DatabaseGrabber.py
|
adamrvfisher/TechnicalAnalysisLibrary
|
38a22b2b2b5052623f81edb11b3c5460fc254e45
|
[
"Apache-2.0"
] | 3
|
2019-04-26T11:13:14.000Z
|
2020-01-10T05:58:16.000Z
|
DatabaseGrabber.py
|
adamrvfisher/TechnicalAnalysisLibrary
|
38a22b2b2b5052623f81edb11b3c5460fc254e45
|
[
"Apache-2.0"
] | null | null | null |
DatabaseGrabber.py
|
adamrvfisher/TechnicalAnalysisLibrary
|
38a22b2b2b5052623f81edb11b3c5460fc254e45
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
@author: Adam Reinhold Von Fisher - https://www.linkedin.com/in/adamrvfisher/
"""
#This is a directory management/query tool
#Define function
def DatabaseGrabber(ticker):
#Import modules
import pandas as pd
#Assign dataframe by reading pickle
dataframe = pd.read_pickle('F:\\Users\\UserName\\DirectoryLocation\\' +
ticker + '\\' + ticker)
#For all columns in dataframe
for i in dataframe.columns:
#Make numeric datatype
dataframe[i] = pd.to_numeric(dataframe[i], errors='coerce')
#Erase duplicate columns
dataframe = dataframe.loc[:,~dataframe.columns.duplicated()]
#Erase duplicate rows
dataframe = dataframe[~dataframe.index.duplicated(keep='first')]
#Output dataframe
return dataframe
| 29.275862
| 78
| 0.639576
|
e29c0750080e9e59f9ed3425eb225967b915877d
| 6,575
|
py
|
Python
|
Lib/Scripts/font/glyphs/language coverage.py
|
gferreira/hTools2
|
a75a671b81a0f4ce5c82b2ad3e2f971ca3e3d98c
|
[
"BSD-3-Clause"
] | 11
|
2015-01-06T15:43:56.000Z
|
2019-07-27T00:35:20.000Z
|
Lib/Scripts/font/glyphs/language coverage.py
|
gferreira/hTools2
|
a75a671b81a0f4ce5c82b2ad3e2f971ca3e3d98c
|
[
"BSD-3-Clause"
] | 2
|
2017-05-17T10:11:46.000Z
|
2018-11-21T21:43:43.000Z
|
Lib/Scripts/font/glyphs/language coverage.py
|
gferreira/hTools2
|
a75a671b81a0f4ce5c82b2ad3e2f971ca3e3d98c
|
[
"BSD-3-Clause"
] | 4
|
2015-01-10T13:58:50.000Z
|
2019-12-18T15:40:14.000Z
|
# [h] diacritics coverage dialog
from vanilla import *
from hTools2 import hDialog
from hTools2.modules.languages import diacritics_chars, diacritics_glyphnames, check_languages_coverage
from hTools2.modules.messages import no_font_open
# objects
class diacriticsCoverageDialog(hDialog):
list_height = 180
languages_order = diacritics_glyphnames.keys()
languages_order.sort()
languages_selected = []
lc = True
uc = True
n = 50
def __init__(self):
self.height = self.list_height + (self.text_height * 3) + (self.button_height * 2) + (self.padding_y * 7)
self.button_width = (self.width - (self.padding_x * 2)) / 2.
self.w = FloatingWindow(
(self.width, self.height),
"accented")
x = self.padding_x
y = self.padding_y
# button : check coverage
self.w.button_check = SquareButton(
(x, y,
-self.padding_x,
self.button_height),
'check coverage',
sizeStyle=self.size_style,
callback=self.check_callback)
y += self.button_height + self.padding_y
self.w.check_mode = RadioGroup(
(x, y,
-self.padding_x,
self.text_height),
["font", "info"],
sizeStyle=self.size_style,
isVertical=False)
self.w.check_mode.set(0)
# languages list
y += self.text_height + self.padding_y
self.w.languages = List(
(x, y,
-self.padding_x,
self.list_height),
self.languages_order)
# checkboxes
y += self.list_height + self.padding_y
self.w.checkbox_lc = CheckBox(
(x, y,
self.button_width,
self.text_height),
'lower',
value=True,
sizeStyle=self.size_style)
x += self.button_width
self.w.checkbox_uc = CheckBox(
(x, y,
self.button_width,
self.text_height),
'upper',
value=True,
sizeStyle=self.size_style)
# button : print info
x = self.padding_x
y += self.text_height + self.padding_y
self.w.button_print = SquareButton(
(x, y,
self.button_width,
self.button_height),
'print',
sizeStyle=self.size_style,
callback=self.print_callback)
# button : make glyphs
x += self.button_width - 1
# y += self.button_height + self.padding_y
self.w.button_make = SquareButton(
(x, y,
-self.padding_x,
self.button_height),
'make',
sizeStyle=self.size_style,
callback=self.make_callback)
# checkbox : select all
x = self.padding_x
y += self.button_height + self.padding_y
self.w.checkbox_all_langs = CheckBox(
(x, y,
-self.padding_x,
self.text_height),
'all languages',
value=True,
sizeStyle=self.size_style,
callback=self.select_all_callback)
# open window
self.w.open()
def get_languages(self):
languages_i = self.w.languages.getSelection()
languages = [ self.languages_order[i] for i in languages_i ]
self.languages_selected = languages
def select_all_callback(self, sender):
# print dir(List)
if sender.get():
self.w.languages.setSelection( range( len(self.w.languages)-1 ) )
else:
self.w.languages.setSelection([])
def build_glyphs(self):
# get parameters
self.get_languages()
lc = self.w.checkbox_lc.get()
uc = self.w.checkbox_uc.get()
chars = []
glyph_names = []
# collect characters and glyph names
for lang in self.languages_selected:
lc_chars, uc_chars = diacritics_chars[lang]
lc_names, uc_names = diacritics_glyphnames[lang]
if lc:
chars += lc_chars.split()
glyph_names += lc_names
if uc:
chars += uc_chars.split()
glyph_names += uc_names
# chars list
chars = list(set(chars))
chars.sort()
# names list
glyph_names = list(set(glyph_names))
glyph_names.sort()
# done
return chars, glyph_names
def print_callback(self, sender):
chars, glyph_names = self.build_glyphs()
# print info
print 'accented characters for languages'
print '=' * self.n
print
print 'languages:'
print '-' * self.n
print '%s\n' % ' '.join(self.languages_selected)
# print characters
print 'characters:'
print '-' * self.n
print '%s\n' % ' '.join(chars)
# print glyph names
print 'glyph names:'
print '-' * self.n
print '%s\n' % ' '.join(glyph_names)
def make_callback(self, sender):
f = CurrentFont()
if f is not None:
glyph_names = self.build_glyphs()[1]
# compare with existing glyphs
new_glyph_names = []
for glyph_name in glyph_names:
if not f.has_key(glyph_name):
new_glyph_names.append(glyph_name)
# create new glyphs
if len(new_glyph_names) > 0:
print 'making glyphs...\n'
print '\t',
for glyph_name in new_glyph_names:
print glyph_name,
f.newGlyph(glyph_name)
print
print '\n...done.\n'
# no new glyph to create
else:
print 'no new glyph to create.'
# no font open
else:
print no_font_open
def check_callback(self, sender):
mode = self.w.check_mode.get()
# get glyphs
if mode:
glyph_names = self.build_glyphs()[1]
else:
f = CurrentFont()
if f is not None:
glyph_names = f.keys()
glyph_names.sort()
else:
print no_font_open
return
# print info
if mode:
print 'language support in accented characters'
else:
print 'language support in current font'
print '=' * self.n
print
check_languages_coverage(glyph_names, self.n)
# run!
diacriticsCoverageDialog()
| 31.014151
| 113
| 0.538251
|
f30a069e38274b47fa13eaa838aa5a36b5b97c5b
| 677
|
py
|
Python
|
src/ang/views.py
|
paradizer/car_shop
|
3c4eb0f234fab0301f5ff11063b8e267e0a76537
|
[
"MIT"
] | 53
|
2016-09-29T09:34:46.000Z
|
2022-02-03T05:13:10.000Z
|
src/ang/views.py
|
paradizer/car_shop
|
3c4eb0f234fab0301f5ff11063b8e267e0a76537
|
[
"MIT"
] | 9
|
2019-12-27T18:28:31.000Z
|
2022-03-12T00:09:48.000Z
|
src/ang/views.py
|
youngwarrior126/Django_Angular
|
1aba51e71db31528ddbe8f6c51193121b287e118
|
[
"MIT"
] | 41
|
2016-10-10T18:50:46.000Z
|
2022-03-10T15:56:38.000Z
|
import os
from django.conf import settings
from django.http import HttpResponse, Http404
from django.views.generic import View
from django.shortcuts import render
class AngularTemplateView(View):
def get(self, request, item=None, *args, **kwargs):
print(item)
template_dir_path = settings.TEMPLATES[0]["DIRS"][0]
final_path = os.path.join(template_dir_path, "ang", "app", item + ".html" )
try:
html = open(final_path)
return HttpResponse(html)
except:
raise Http404
# def get_angular_template(request, item=None):
# print(item)
# return render(request, "ang/app/blog-list.html", {})
| 27.08
| 83
| 0.658789
|
1b88f4a2ff4fb432f8ab95a45a17a32ded367dbf
| 5,133
|
py
|
Python
|
ibeis/algo/preproc/preproc_image.py
|
SU-ECE-17-7/ibeis
|
b12a45b06d8ce8f52585494c15f6776f5889ed67
|
[
"Apache-2.0"
] | null | null | null |
ibeis/algo/preproc/preproc_image.py
|
SU-ECE-17-7/ibeis
|
b12a45b06d8ce8f52585494c15f6776f5889ed67
|
[
"Apache-2.0"
] | null | null | null |
ibeis/algo/preproc/preproc_image.py
|
SU-ECE-17-7/ibeis
|
b12a45b06d8ce8f52585494c15f6776f5889ed67
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from os.path import splitext, basename
import warnings # NOQA
import vtool.exif as vtexif
import utool as ut
#import numpy as np # NOQA
#import hashlib
#import uuid
(print, rrr, profile) = ut.inject2(__name__, '[preproc_img]', DEBUG=False)
#@profile
def parse_exif(pil_img):
""" Image EXIF helper
Cyth::
cdef:
Image pil_img
dict exif_dict
long lat
long lon
long exiftime
"""
exif_dict = vtexif.get_exif_dict(pil_img)
# TODO: More tags
# (mainly the orientation tag)
lat, lon = vtexif.get_lat_lon(exif_dict)
orient = vtexif.get_orientation(exif_dict)
time = vtexif.get_unixtime(exif_dict)
return time, lat, lon, orient
def get_standard_ext(gpath):
""" Returns standardized image extension
Cyth::
cdef:
str gpath
str ext
"""
ext = splitext(gpath)[1].lower()
return '.jpg' if ext == '.jpeg' else ext
@profile
def parse_imageinfo(gpath):
""" Worker function: gpath must be in UNIX-PATH format!
Args:
tup (tuple): a tuple or one argument
(so the function can be parallelized easily)
(here it is just gpath, no tuple, sorry for confusion)
Returns:
tuple: param_tup -
if successful returns a tuple of image parameters which are values
for SQL columns on else returns None
CommandLine:
python -m ibeis.algo.preproc.preproc_image --exec-parse_imageinfo
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.algo.preproc.preproc_image import * # NOQA
>>> gpath = ('/media/raid/work/lynx/_ibsdb/images/f6c84c6d-55ca-fd02-d0b4-1c7c9c27c894.jpg')
>>> param_tup = parse_imageinfo(tup)
>>> result = ('param_tup = %s' % (str(param_tup),))
>>> print(result)
"""
# Parse arguments from tuple
#print('[ginfo] gpath=%r' % gpath)
# Try to open the image
from PIL import Image # NOQA
with warnings.catch_warnings(record=True) as w:
try:
# Open image with Exif support
pil_img = Image.open(gpath, 'r') # NOQA
except IOError as ex:
print('[preproc] IOError: %s' % (str(ex),))
return None
if len(w) > 0:
for warn in w:
warnings.showwarning(warn.message, warn.category,
warn.filename, warn.lineno, warn.file,
warn.line)
#warnstr = warnings.formatwarning
#print(warnstr)
print('Warnings issued by %r' % (gpath,))
# Parse out the data
width, height = pil_img.size # Read width, height
time, lat, lon, orient = parse_exif(pil_img) # Read exif tags
if orient in [6, 8]:
width, height = height, width
# We cannot use pixel data as libjpeg is not determenistic (even for reads!)
image_uuid = ut.get_file_uuid(gpath) # Read file ]-hash-> guid = gid
#orig_gpath = gpath
orig_gname = basename(gpath)
ext = get_standard_ext(gpath)
notes = ''
# Build parameters tuple
param_tup = (
image_uuid,
gpath,
gpath,
orig_gname,
#orig_gpath,
ext,
width,
height,
time,
lat,
lon,
orient,
notes
)
#print('[ginfo] %r %r' % (image_uuid, orig_gname))
return param_tup
@profile
def add_images_params_gen(gpath_list, **kwargs):
"""
generates values for add_images sqlcommands asychronously
Args:
gpath_list (list):
Kwargs:
ordered, force_serial, chunksize, prog, verbose, quiet, nTasks, freq,
adjust
Returns:
generator: params_gen
CommandLine:
python -m ibeis.algo.preproc.preproc_image --exec-add_images_params_gen
Example0:
>>> # ENABLE_DOCTEST
>>> from ibeis.algo.preproc.preproc_image import * # NOQA
>>> from vtool.tests import grabdata
>>> gpath_list = grabdata.get_test_gpaths(ndata=3) + ['doesnotexist.jpg']
>>> params_list = list(add_images_params_gen(gpath_list))
>>> assert str(params_list[0][0]) == '66ec193a-1619-b3b6-216d-1784b4833b61', 'UUID gen method changed'
>>> assert str(params_list[0][3]) == 'easy1.JPG', 'orig name is different'
>>> assert params_list[3] is None
"""
#preproc_args = [(gpath, kwargs) for gpath in gpath_list]
#print('[about to parse]: gpath_list=%r' % (gpath_list,))
params_gen = ut.generate(parse_imageinfo, gpath_list, adjust=True,
force_serial=True, **kwargs)
return params_gen
def on_delete(ibs, featweight_rowid_list, qreq_=None):
print('Warning: Not Implemented')
if __name__ == '__main__':
"""
python -m ibeis.algo.preproc.preproc_image
python -m ibeis.algo.preproc.preproc_image --allexamples
"""
import multiprocessing
multiprocessing.freeze_support()
ut.doctest_funcs()
| 30.372781
| 110
| 0.607052
|
e1cc7e0977f123946d07d27f5675923aeb227139
| 6,781
|
py
|
Python
|
DecryptLogin/modules/core/douban.py
|
0honus0/DecryptLogin
|
8cc4527d37bee4ff0aeecbaf93e2d3910abf44f7
|
[
"MIT"
] | null | null | null |
DecryptLogin/modules/core/douban.py
|
0honus0/DecryptLogin
|
8cc4527d37bee4ff0aeecbaf93e2d3910abf44f7
|
[
"MIT"
] | null | null | null |
DecryptLogin/modules/core/douban.py
|
0honus0/DecryptLogin
|
8cc4527d37bee4ff0aeecbaf93e2d3910abf44f7
|
[
"MIT"
] | null | null | null |
'''
Function:
豆瓣模拟登录
Author:
Charles
微信公众号:
Charles的皮卡丘
更新日期:
2022-03-09
'''
import os
import re
import time
import requests
from ..utils import removeImage, saveImage, showImage
'''PC端登录豆瓣'''
class doubanPC():
is_callable = True
def __init__(self, **kwargs):
for key, value in kwargs.items(): setattr(self, key, value)
self.info = 'login in douban in pc mode'
self.session = requests.Session()
self.__initialize()
'''登录函数'''
def login(self, username, password, crack_captcha_func=None, **kwargs):
# 设置代理
self.session.proxies.update(kwargs.get('proxies', {}))
# 初始化cookie
response = self.session.get(self.home_url)
# 模拟登录
data = {
'ck': '',
'name': username,
'password': password,
'remember': 'true',
'ticket': ''
}
response = self.session.post(self.login_url, data=data)
response_json = response.json()
# 登录成功
if response_json['status'] == 'success':
print('[INFO]: Account -> %s, login successfully' % username)
infos_return = {'username': username}
infos_return.update(response_json)
return infos_return, self.session
# 账号或密码错误
elif response_json['status'] == 'failed' and response_json['message'] == 'unmatch_name_password':
raise RuntimeError('Account -> %s, fail to login, username or password error' % username)
# 其他错误
else:
raise RuntimeError(response_json.get('description'))
'''初始化'''
def __initialize(self):
self.headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.51 Safari/537.36',
'Host': 'accounts.douban.com',
'Origin': 'https://accounts.douban.com',
'Referer': 'https://accounts.douban.com/passport/login_popup?login_source=anony'
}
self.home_url = 'https://www.douban.com/'
self.login_url = 'https://accounts.douban.com/j/mobile/login/basic'
self.session.headers.update(self.headers)
'''移动端登录豆瓣'''
class doubanMobile():
is_callable = False
def __init__(self, **kwargs):
for key, value in kwargs.items(): setattr(self, key, value)
self.info = 'login in douban in mobile mode'
'''扫码登录豆瓣'''
class doubanScanqr():
is_callable = True
def __init__(self, **kwargs):
for key, value in kwargs.items(): setattr(self, key, value)
self.info = 'login in douban in scanqr mode'
self.session = requests.Session()
self.cur_path = os.getcwd()
self.__initialize()
'''登录函数'''
def login(self, username, password, crack_captcha_func=None, **kwargs):
# 设置代理
self.session.proxies.update(kwargs.get('proxies', {}))
# 下载二维码
data = {
'ck': '',
'ticket': 't03mZd7QmXsZo5ekor2XwtvV6ezR7hRDxYBnQwC3WIdK6uvfPq4iCOG-JFG-TkoTg6vWEueuKFIpJpP8_BJlG8XNlUUQCtoBmarY7ZS5DTTir1Z3i7pgpXsJQ**',
'randstr': '@xGH',
'tc_app_id': '2044348370',
}
response = self.session.post(self.qrcode_url, data=data)
response_json = response.json()
if response_json['status'] != 'success': raise RuntimeError(response_json)
code, img_url = response_json['payload']['code'], response_json['payload']['img']
headers = {'User-Agent': self.headers['User-Agent']}
response = requests.get(img_url, headers=headers)
saveImage(response.content, os.path.join(self.cur_path, 'qrcode.png'))
showImage(os.path.join(self.cur_path, 'qrcode.png'))
# 检测扫码状态
params = {
'ck': '',
'code': code,
}
while True:
response = self.session.get(self.status_url, params=params)
response_json = response.json()
login_status = response_json['payload']['login_status']
if login_status in ['pending', 'scan']:
time.sleep(1)
continue
elif login_status in ['login']:
break
else:
raise RuntimeError(response_json)
# 登录成功
removeImage(os.path.join(self.cur_path, 'qrcode.png'))
response = self.session.get(self.stat_url)
response = self.session.get(self.home_url)
username = re.findall(r'input name="nick" type="text" value="(.*?)"', response.text)[0]
print('[INFO]: Account -> %s, login successfully' % username)
infos_return = {'username': username, 'text': response.text}
infos_return.update(response_json)
return infos_return, self.session
'''初始化'''
def __initialize(self):
self.headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.51 Safari/537.36',
'Host': 'accounts.douban.com',
'Origin': 'https://accounts.douban.com',
'Referer': 'https://accounts.douban.com/passport/login_popup?login_source=anony'
}
self.qrcode_url = 'https://accounts.douban.com/j/mobile/login/qrlogin_code'
self.status_url = 'https://accounts.douban.com/j/mobile/login/qrlogin_status'
self.stat_url = 'https://www.douban.com/stat.html?&action=login_success&platform=qrcode&callback=jsonp_00czy4260w6yer2'
self.home_url = 'https://www.douban.com/'
self.session.headers.update(self.headers)
'''
Function:
豆瓣模拟登录
Detail:
-login:
Input:
--username: 用户名
--password: 密码
--mode: mobile/pc/scanqr
--crack_captcha_func: 若提供验证码接口, 则利用该接口来实现验证码的自动识别
--proxies: 为requests.Session()设置代理
Return:
--infos_return: 用户名等信息
--session: 登录后的requests.Session()
'''
class douban():
def __init__(self, **kwargs):
self.info = 'login in douban'
self.supported_modes = {
'pc': doubanPC(**kwargs),
'mobile': doubanMobile(**kwargs),
'scanqr': doubanScanqr(**kwargs),
}
'''登录函数'''
def login(self, username='', password='', mode='scanqr', crack_captcha_func=None, **kwargs):
assert mode in self.supported_modes, 'unsupport mode %s in douban.login' % mode
selected_api = self.supported_modes[mode]
if not selected_api.is_callable: raise NotImplementedError('not be implemented for mode %s in douban.login' % mode)
args = {
'username': username,
'password': password,
'crack_captcha_func': crack_captcha_func,
}
args.update(kwargs)
return selected_api.login(**args)
| 38.310734
| 148
| 0.598289
|
2be38d8e7ad1f8caa76b9a731a50b850857497a0
| 29,121
|
py
|
Python
|
lib/efficientnet.py
|
wonderit/aptos-retinopathy-detection
|
8402ce798fa0419627fb878788fa771849e34516
|
[
"MIT"
] | null | null | null |
lib/efficientnet.py
|
wonderit/aptos-retinopathy-detection
|
8402ce798fa0419627fb878788fa771849e34516
|
[
"MIT"
] | null | null | null |
lib/efficientnet.py
|
wonderit/aptos-retinopathy-detection
|
8402ce798fa0419627fb878788fa771849e34516
|
[
"MIT"
] | null | null | null |
# Code from https://github.com/abhuse/pytorch-efficientnet
from math import ceil
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch._six import container_abcs
from torch.utils import model_zoo
def _pair(x):
if isinstance(x, container_abcs.Iterable):
return x
return (x, x)
class SamePaddingConv2d(nn.Module):
def __init__(self,
in_spatial_shape,
in_channels,
out_channels,
kernel_size,
stride,
dilation=1,
enforce_in_spatial_shape=False,
**kwargs):
super(SamePaddingConv2d, self).__init__()
self._in_spatial_shape = _pair(in_spatial_shape)
# e.g. throw exception if input spatial shape does not match in_spatial_shape
# when calling self.forward()
self.enforce_in_spatial_shape = enforce_in_spatial_shape
kernel_size = _pair(kernel_size)
stride = _pair(stride)
dilation = _pair(dilation)
in_height, in_width = self._in_spatial_shape
filter_height, filter_width = kernel_size
stride_heigth, stride_width = stride
dilation_height, dilation_width = dilation
out_height = int(ceil(float(in_height) / float(stride_heigth)))
out_width = int(ceil(float(in_width) / float(stride_width)))
pad_along_height = max((out_height - 1) * stride_heigth +
filter_height + (filter_height - 1) * (dilation_height - 1) - in_height, 0)
pad_along_width = max((out_width - 1) * stride_width +
filter_width + (filter_width - 1) * (dilation_width - 1) - in_width, 0)
pad_top = pad_along_height // 2
pad_bottom = pad_along_height - pad_top
pad_left = pad_along_width // 2
pad_right = pad_along_width - pad_left
paddings = (pad_left, pad_right, pad_top, pad_bottom)
if any(p > 0 for p in paddings):
self.zero_pad = nn.ZeroPad2d(paddings)
else:
self.zero_pad = None
self.conv = nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
**kwargs)
self._out_spatial_shape = (out_height, out_width)
@property
def in_spatial_shape(self):
return self._in_spatial_shape
@property
def out_spatial_shape(self):
return self._out_spatial_shape
@property
def in_channels(self):
return self.conv.in_channels
@property
def out_channels(self):
return self.conv.out_channels
def check_spatial_shape(self, x):
if x.size(2) != self.in_spatial_shape[0] or \
x.size(3) != self.in_spatial_shape[1]:
raise ValueError(
"Expected input spatial shape {}, got {} instead".format(self.in_spatial_shape,
x.shape[2:]))
def forward(self, x):
if self.enforce_in_spatial_shape:
self.check_spatial_shape(x)
if self.zero_pad is not None:
x = self.zero_pad(x)
x = self.conv(x)
return x
class ConvBNAct(nn.Module):
def __init__(self,
out_channels,
activation=None,
bn_epsilon=None,
bn_momentum=None,
same_padding=False,
**kwargs):
super(ConvBNAct, self).__init__()
_conv_cls = SamePaddingConv2d if same_padding else nn.Conv2d
self.conv = _conv_cls(out_channels=out_channels, **kwargs)
bn_kwargs = {}
if bn_epsilon is not None:
bn_kwargs["eps"] = bn_epsilon
if bn_momentum is not None:
bn_kwargs["momentum"] = bn_momentum
self.bn = nn.BatchNorm2d(out_channels, **bn_kwargs)
self.activation = activation
@property
def in_spatial_shape(self):
if isinstance(self.conv, SamePaddingConv2d):
return self.conv.in_spatial_shape
else:
return None
@property
def out_spatial_shape(self):
if isinstance(self.conv, SamePaddingConv2d):
return self.conv.out_spatial_shape
else:
return None
@property
def in_channels(self):
return self.conv.in_channels
@property
def out_channels(self):
return self.conv.out_channels
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
if self.activation is not None:
x = self.activation(x)
return x
class Swish(nn.Module):
def __init__(self,
beta=1.0,
beta_learnable=False):
super(Swish, self).__init__()
if beta == 1.0 and not beta_learnable:
self._op = self.simple_swish
else:
self.beta = nn.Parameter(torch.full([1], beta),
requires_grad=beta_learnable)
self._op = self.advanced_swish
def simple_swish(self, x):
return x * torch.sigmoid(x)
def advanced_swish(self, x):
return x * torch.sigmoid(self.beta * x)
def forward(self, x):
return self._op(x)
class DropConnect(nn.Module):
def __init__(self, rate=0.5):
super(DropConnect, self).__init__()
self.keep_prob = None
self.set_rate(rate)
def set_rate(self, rate):
if not 0 <= rate < 1:
raise ValueError("rate must be 0<=rate<1, got {} instead".format(rate))
self.keep_prob = 1 - rate
def forward(self, x):
if self.training:
random_tensor = self.keep_prob + torch.rand([x.size(0), 1, 1, 1],
dtype=x.dtype,
device=x.device)
binary_tensor = torch.floor(random_tensor)
return torch.mul(torch.div(x, self.keep_prob), binary_tensor)
else:
return x
class SqueezeExcitate(nn.Module):
def __init__(self,
in_channels,
se_size,
activation=None):
super(SqueezeExcitate, self).__init__()
self.dim_reduce = nn.Conv2d(in_channels=in_channels,
out_channels=se_size,
kernel_size=1)
self.dim_restore = nn.Conv2d(in_channels=se_size,
out_channels=in_channels,
kernel_size=1)
self.activation = F.relu if activation is None else activation
def forward(self, x):
inp = x
x = F.adaptive_avg_pool2d(x, (1, 1))
x = self.dim_reduce(x)
x = self.activation(x)
x = self.dim_restore(x)
x = torch.sigmoid(x)
return torch.mul(inp, x)
class MBConvBlock(nn.Module):
def __init__(self,
in_spatial_shape,
in_channels,
out_channels,
kernel_size,
stride,
expansion_factor,
activation,
bn_epsilon=None,
bn_momentum=None,
se_size=None,
drop_connect_rate=None,
bias=False):
"""
Initialize new MBConv block
:param in_spatial_shape: image shape, e.g. tuple [height, width] or int size for [size, size]
:param in_channels: number of input channels
:param out_channels: number of output channels
:param kernel_size: kernel size for depth-wise convolution
:param stride: stride for depth-wise convolution
:param expansion_factor: expansion factor
:param bn_epsilon: batch normalization epsilon
:param bn_momentum: batch normalization momentum
:param se_size: number of features in reduction layer of Squeeze-and-Excitate layer
:param activation: activation function
:param drop_connect_rate: DropConnect rate
:param bias: enable bias in convolution operations
"""
super(MBConvBlock, self).__init__()
if se_size is not None and se_size < 1:
raise ValueError("se_size must be >=1, got {} instead".format(se_size))
if drop_connect_rate is not None and not 0 <= drop_connect_rate < 1:
raise ValueError("drop_connect_rate must be in range [0,1), got {} instead".format(drop_connect_rate))
if not (isinstance(expansion_factor, int) and expansion_factor >= 1):
raise ValueError("expansion factor must be int and >=1, got {} instead".format(expansion_factor))
exp_channels = in_channels * expansion_factor
kernel_size = _pair(kernel_size)
stride = _pair(stride)
self.activation = activation
# expansion convolution
if expansion_factor != 1:
self.expand_conv = ConvBNAct(in_channels=in_channels,
out_channels=exp_channels,
kernel_size=(1, 1),
bias=bias,
activation=self.activation,
bn_epsilon=bn_epsilon,
bn_momentum=bn_momentum)
else:
self.expand_conv = None
# depth-wise convolution
self.dp_conv = ConvBNAct(in_spatial_shape=in_spatial_shape,
in_channels=exp_channels,
out_channels=exp_channels,
kernel_size=kernel_size,
stride=stride,
groups=exp_channels,
bias=bias,
activation=self.activation,
same_padding=True,
bn_epsilon=bn_epsilon,
bn_momentum=bn_momentum)
if se_size is not None:
self.se = SqueezeExcitate(exp_channels,
se_size,
activation=self.activation)
else:
self.se = None
if drop_connect_rate is not None:
self.drop_connect = DropConnect(drop_connect_rate)
else:
self.drop_connect = None
if in_channels == out_channels and all(s == 1 for s in stride):
self.skip_enabled = True
else:
self.skip_enabled = False
# projection convolution
self.project_conv = ConvBNAct(in_channels=exp_channels,
out_channels=out_channels,
kernel_size=(1, 1),
bias=bias,
activation=None,
bn_epsilon=bn_epsilon,
bn_momentum=bn_momentum)
@property
def in_spatial_shape(self):
return self.dp_conv.in_spatial_shape
@property
def out_spatial_shape(self):
return self.dp_conv.out_spatial_shape
@property
def in_channels(self):
if self.expand_conv is not None:
return self.expand_conv.in_channels
else:
return self.dp_conv.in_channels
@property
def out_channels(self):
return self.project_conv.out_channels
def forward(self, x):
inp = x
if self.expand_conv is not None:
# expansion convolution applied only if expansion ratio > 1
x = self.expand_conv(x)
# depth-wise convolution
x = self.dp_conv(x)
# squeeze-and-excitate
if self.se is not None:
x = self.se(x)
# projection convolution
x = self.project_conv(x)
if self.skip_enabled:
# drop-connect applied only if skip connection enabled
if self.drop_connect is not None:
x = self.drop_connect(x)
x = x + inp
return x
class EnetStage(nn.Module):
def __init__(self,
num_layers,
in_spatial_shape,
in_channels,
out_channels,
stride,
se_ratio,
drop_connect_rates,
**kwargs):
super(EnetStage, self).__init__()
if not (isinstance(num_layers, int) and num_layers >= 1):
raise ValueError("num_layers must be int and >=1, got {} instead".format(num_layers))
if not (isinstance(drop_connect_rates, container_abcs.Iterable) and
len(drop_connect_rates) == num_layers):
raise ValueError("drop_connect_rates must be iterable of "
"length num_layers ({}), got {} instead".format(num_layers, drop_connect_rates))
self.num_layers = num_layers
self.layers = nn.ModuleList()
spatial_shape = in_spatial_shape
for i in range(self.num_layers):
se_size = max(1, in_channels // se_ratio)
layer = MBConvBlock(in_spatial_shape=spatial_shape,
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
se_size=se_size,
drop_connect_rate=drop_connect_rates[i],
**kwargs)
self.layers.append(layer)
spatial_shape = layer.out_spatial_shape
# remaining MBConv blocks have stride 1 and in_channels=out_channels
stride = 1
in_channels = out_channels
@property
def in_spatial_shape(self):
return self.layers[0].in_spatial_shape
@property
def out_spatial_shape(self):
return self.layers[-1].out_spatial_shape
@property
def in_channels(self):
return self.layers[0].in_channels
@property
def out_channels(self):
return self.layers[-1].out_channels
def forward(self, x):
for layer in self.layers:
x = layer(x)
return x
def round_filters(filters, width_coefficient, depth_divisor=8):
"""Round number of filters based on depth multiplier."""
min_depth = depth_divisor
filters *= width_coefficient
new_filters = max(min_depth, int(filters + depth_divisor / 2) // depth_divisor * depth_divisor)
# Make sure that round down does not go down by more than 10%.
if new_filters < 0.9 * filters:
new_filters += depth_divisor
return int(new_filters)
def round_repeats(repeats, depth_coefficient):
"""Round number of filters based on depth multiplier."""
return int(ceil(depth_coefficient * repeats))
class EfficientNet(nn.Module):
# (width_coefficient, depth_coefficient, dropout_rate, in_spatial_shape)
coefficients = [
(1.0, 1.0, 0.2, 224),
(1.0, 1.1, 0.2, 240),
(1.1, 1.2, 0.3, 260),
(1.2, 1.4, 0.3, 300),
(1.4, 1.8, 0.4, 380),
(1.6, 2.2, 0.4, 456),
(1.8, 2.6, 0.5, 528),
(2.0, 3.1, 0.5, 600),
]
# block_repeat, kernel_size, stride, expansion_factor, input_channels, output_channels, se_ratio
stage_args = [
[1, 3, 1, 1, 32, 16, 4],
[2, 3, 2, 6, 16, 24, 4],
[2, 5, 2, 6, 24, 40, 4],
[3, 3, 2, 6, 40, 80, 4],
[3, 5, 1, 6, 80, 112, 4],
[4, 5, 2, 6, 112, 192, 4],
[1, 3, 1, 6, 192, 320, 4],
]
state_dict_urls = [
"https://storage.googleapis.com/abhuse/pretrained_models/efficientnet/efficientnet-b0-d86f8792.pth",
"https://storage.googleapis.com/abhuse/pretrained_models/efficientnet/efficientnet-b1-82896633.pth",
"https://storage.googleapis.com/abhuse/pretrained_models/efficientnet/efficientnet-b2-e4b93854.pth",
"https://storage.googleapis.com/abhuse/pretrained_models/efficientnet/efficientnet-b3-3b9ca610.pth",
"https://storage.googleapis.com/abhuse/pretrained_models/efficientnet/efficientnet-b4-24436ca5.pth",
"https://storage.googleapis.com/abhuse/pretrained_models/efficientnet/efficientnet-b5-d8e577e8.pth",
"https://storage.googleapis.com/abhuse/pretrained_models/efficientnet/efficientnet-b6-f20845c7.pth",
"https://storage.googleapis.com/abhuse/pretrained_models/efficientnet/efficientnet-b7-86e8e374.pth",
]
def __init__(self,
b,
in_channels=3,
n_classes=1000,
in_spatial_shape=None,
activation=Swish(),
bias=False,
drop_connect_rate=0.2,
dropout_rate=None,
bn_epsilon=1e-3,
bn_momentum=0.01,
pretrained=False,
progress=False):
"""
Initialize new EfficientNet model
:param b: model index, i.e. 0 for EfficientNet-B0
:param in_channels: number of input channels
:param n_classes: number of output classes
:param in_spatial_shape: input image shape
:param activation: activation function
:param bias: enable bias in convolution operations
:param drop_connect_rate: DropConnect rate
:param dropout_rate: dropout rate, this will override default rate for each model
:param bn_epsilon: batch normalization epsilon
:param bn_momentum: batch normalization momentum
:param pretrained: initialize model with weights pre-trained on ImageNet
:param progress: show progress when downloading pre-trained weights
"""
super(EfficientNet, self).__init__()
# verify all parameters
EfficientNet.check_init_params(b,
in_channels,
n_classes,
in_spatial_shape,
activation,
bias,
drop_connect_rate,
dropout_rate,
bn_epsilon,
bn_momentum,
pretrained,
progress)
self.b = b
self.in_channels = in_channels
self.activation = activation
self.drop_connect_rate = drop_connect_rate
self._override_dropout_rate = dropout_rate
width_coefficient, _, _, spatial_shape = EfficientNet.coefficients[self.b]
if in_spatial_shape is not None:
self.in_spatial_shape = _pair(in_spatial_shape)
else:
self.in_spatial_shape = _pair(spatial_shape)
# initial convolution
init_conv_out_channels = round_filters(32, width_coefficient)
self.init_conv = ConvBNAct(in_spatial_shape=self.in_spatial_shape,
in_channels=self.in_channels,
out_channels=init_conv_out_channels,
kernel_size=(3, 3),
stride=(2, 2),
bias=bias,
activation=self.activation,
same_padding=True,
bn_epsilon=bn_epsilon,
bn_momentum=bn_momentum)
spatial_shape = self.init_conv.out_spatial_shape
self.stages = nn.ModuleList()
mbconv_idx = 0
dc_rates = self.get_dc_rates()
for stage_id in range(self.num_stages):
kernel_size = self.get_stage_kernel_size(stage_id)
stride = self.get_stage_stride(stage_id)
expansion_factor = self.get_stage_expansion_factor(stage_id)
stage_in_channels = self.get_stage_in_channels(stage_id)
stage_out_channels = self.get_stage_out_channels(stage_id)
stage_num_layers = self.get_stage_num_layers(stage_id)
stage_dc_rates = dc_rates[mbconv_idx:mbconv_idx + stage_num_layers]
stage_se_ratio = self.get_stage_se_ratio(stage_id)
stage = EnetStage(num_layers=stage_num_layers,
in_spatial_shape=spatial_shape,
in_channels=stage_in_channels,
out_channels=stage_out_channels,
stride=stride,
se_ratio=stage_se_ratio,
drop_connect_rates=stage_dc_rates,
kernel_size=kernel_size,
expansion_factor=expansion_factor,
activation=self.activation,
bn_epsilon=bn_epsilon,
bn_momentum=bn_momentum,
bias=bias
)
self.stages.append(stage)
spatial_shape = stage.out_spatial_shape
mbconv_idx += stage_num_layers
head_conv_out_channels = round_filters(1280, width_coefficient)
head_conv_in_channels = self.stages[-1].layers[-1].project_conv.out_channels
self.head_conv = ConvBNAct(in_channels=head_conv_in_channels,
out_channels=head_conv_out_channels,
kernel_size=(1, 1),
bias=bias,
activation=self.activation,
bn_epsilon=bn_epsilon,
bn_momentum=bn_momentum)
if self.dropout_rate > 0:
self.dropout = nn.Dropout(p=self.dropout_rate)
else:
self.dropout = None
self.avpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(head_conv_out_channels, n_classes)
if pretrained:
self._load_state(self.b, in_channels, n_classes, progress)
@property
def num_stages(self):
return len(EfficientNet.stage_args)
@property
def width_coefficient(self):
return EfficientNet.coefficients[self.b][0]
@property
def depth_coefficient(self):
return EfficientNet.coefficients[self.b][1]
@property
def dropout_rate(self):
if self._override_dropout_rate is None:
return EfficientNet.coefficients[self.b][2]
else:
return self._override_dropout_rate
def get_stage_kernel_size(self, stage):
return EfficientNet.stage_args[stage][1]
def get_stage_stride(self, stage):
return EfficientNet.stage_args[stage][2]
def get_stage_expansion_factor(self, stage):
return EfficientNet.stage_args[stage][3]
def get_stage_in_channels(self, stage):
width_coefficient = self.width_coefficient
in_channels = EfficientNet.stage_args[stage][4]
return round_filters(in_channels, width_coefficient)
def get_stage_out_channels(self, stage):
width_coefficient = self.width_coefficient
out_channels = EfficientNet.stage_args[stage][5]
return round_filters(out_channels, width_coefficient)
def get_stage_se_ratio(self, stage):
return EfficientNet.stage_args[stage][6]
def get_stage_num_layers(self, stage):
depth_coefficient = self.depth_coefficient
num_layers = EfficientNet.stage_args[stage][0]
return round_repeats(num_layers, depth_coefficient)
def get_num_mbconv_layers(self):
total = 0
for i in range(self.num_stages):
total += self.get_stage_num_layers(i)
return total
def get_dc_rates(self):
total_mbconv_layers = self.get_num_mbconv_layers()
return [self.drop_connect_rate * i / total_mbconv_layers
for i in range(total_mbconv_layers)]
def _load_state(self, b, in_channels, n_classes, progress):
state_dict = model_zoo.load_url(EfficientNet.state_dict_urls[b], progress=progress)
strict = True
if in_channels != 3:
state_dict.pop('init_conv.conv.conv.weight')
strict = False
if n_classes != 1000:
state_dict.pop('fc.weight')
state_dict.pop('fc.bias')
strict = False
self.load_state_dict(state_dict, strict=strict)
print("Model weights loaded successfully.")
def check_input(self, x):
if x.dim() != 4:
raise ValueError("Input x must be 4 dimensional tensor, got {} instead".format(x.dim()))
if x.size(1) != self.in_channels:
raise ValueError("Input must have {} channels, got {} instead".format(self.in_channels,
x.size(1)))
@staticmethod
def check_init_params(b,
in_channels,
n_classes,
in_spatial_shape,
activation,
bias,
drop_connect_rate,
override_dropout_rate,
bn_epsilon,
bn_momentum,
pretrained,
progress):
if not isinstance(b, int):
raise ValueError("b must be int, got {} instead".format(type(b)))
elif not 0 <= b < len(EfficientNet.coefficients):
raise ValueError("b must be in range 0<=b<=7, got {} instead".format(b))
if not isinstance(in_channels, int):
raise ValueError("in_channels must be int, got {} instead".format(type(in_channels)))
elif not in_channels > 0:
raise ValueError("in_channels must be > 0, got {} instead".format(in_channels))
if not isinstance(n_classes, int):
raise ValueError("n_classes must be int, got {} instead".format(type(n_classes)))
elif not n_classes > 0:
raise ValueError("n_classes must be > 0, got {} instead".format(n_classes))
if not (in_spatial_shape is None or
isinstance(in_spatial_shape, int) or
(isinstance(in_spatial_shape, container_abcs.Iterable) and
len(in_spatial_shape) == 2 and
all(isinstance(s, int) for s in in_spatial_shape))):
raise ValueError("in_spatial_shape must be either None, int or iterable of ints of length 2"
", got {} instead".format(in_spatial_shape))
if activation is not None and not callable(activation):
raise ValueError("activation must be callable but is not")
if not isinstance(bias, bool):
raise ValueError("bias must be bool, got {} instead".format(type(bias)))
if not isinstance(drop_connect_rate, float):
raise ValueError("drop_connect_rate must be float, got {} instead".format(type(drop_connect_rate)))
elif not 0 <= drop_connect_rate < 1.0:
raise ValueError("drop_connect_rate must be within range 0 <= drop_connect_rate < 1.0, "
"got {} instead".format(drop_connect_rate))
if override_dropout_rate is not None:
if not isinstance(override_dropout_rate, float):
raise ValueError("dropout_rate must be either None or float, "
"got {} instead".format(type(override_dropout_rate)))
elif not 0 <= override_dropout_rate < 1.0:
raise ValueError("dropout_rate must be within range 0 <= dropout_rate < 1.0, "
"got {} instead".format(override_dropout_rate))
if not isinstance(bn_epsilon, float):
raise ValueError("bn_epsilon must be float, got {} instead".format(bn_epsilon))
if not isinstance(bn_momentum, float):
raise ValueError("bn_momentum must be float, got {} instead".format(bn_momentum))
if not isinstance(pretrained, bool):
raise ValueError("pretrained must be bool, got {} instead".format(type(pretrained)))
if not isinstance(progress, bool):
raise ValueError("progress must be bool, got {} instead".format(type(progress)))
def get_features(self, x):
self.check_input(x)
x = self.init_conv(x)
out = []
for stage in self.stages:
x = stage(x)
out.append(x)
return out
def forward(self, x):
x = self.get_features(x)[-1]
x = self.head_conv(x)
x = self.avpool(x)
x = torch.flatten(x, 1)
if self.dropout is not None:
x = self.dropout(x)
x = self.fc(x)
return x
| 37.770428
| 114
| 0.56406
|
88ce58f1a51a1b04b8c64c6df613e2f6de774375
| 876
|
py
|
Python
|
medium/8. String to Integer (atoi).py
|
junyinglucn/leetcode
|
1fbd0962e4b7dc46b4ed4f0f86778cfedbda72e7
|
[
"MIT"
] | null | null | null |
medium/8. String to Integer (atoi).py
|
junyinglucn/leetcode
|
1fbd0962e4b7dc46b4ed4f0f86778cfedbda72e7
|
[
"MIT"
] | null | null | null |
medium/8. String to Integer (atoi).py
|
junyinglucn/leetcode
|
1fbd0962e4b7dc46b4ed4f0f86778cfedbda72e7
|
[
"MIT"
] | null | null | null |
class Solution:
def myAtoi(self, str: str) -> int:
str = str.strip()
if not str: return 0
result = ''
flag = 1
for i in range(len(str)):
if i == 0:
if str[i] not in ['+', '-'] and not str[i].isdigit():
return 0
if str[i] == '-':
flag = -1
elif str[i] == '+':
flag = 1
else:
result += str[i]
elif not str[i].isdigit():
break
else:
result += str[i]
if result:
if -2 ** 31 <= int(result) * flag <= 2 ** 31 - 1:
return int(result) * flag
elif flag == -1:
return - 2 ** 31
else:
return 2 ** 31 - 1
else:
return 0
| 29.2
| 69
| 0.336758
|
af53f880d59462e260170c02a027ce18552e58b5
| 9,826
|
py
|
Python
|
1d/shelf.py
|
RunxinNi/multilayer-examples
|
53c8525969193130005ff08e2a6fd25e842d9a99
|
[
"MIT"
] | 1
|
2021-05-23T04:25:48.000Z
|
2021-05-23T04:25:48.000Z
|
1d/shelf.py
|
RunxinNi/multilayer-examples
|
53c8525969193130005ff08e2a6fd25e842d9a99
|
[
"MIT"
] | null | null | null |
1d/shelf.py
|
RunxinNi/multilayer-examples
|
53c8525969193130005ff08e2a6fd25e842d9a99
|
[
"MIT"
] | 4
|
2017-06-21T13:33:44.000Z
|
2020-02-25T18:34:51.000Z
|
#!/usr/bin/env python
# encoding: utf-8
r"""Runs idealized jump and sloped 1d shelf tests"""
import sys
from clawpack.riemann import layered_shallow_water_1D
import clawpack.clawutil.runclaw as runclaw
from clawpack.pyclaw.plot import plot
import multilayer as ml
def jump_shelf(num_cells,eigen_method,**kargs):
r"""Shelf test"""
# Construct output and plot directory paths
prefix = 'ml_e%s_n%s' % (eigen_method,num_cells)
name = 'multilayer/jump_shelf'
outdir,plotdir,log_path = runclaw.create_output_paths(name,prefix,**kargs)
# Redirect loggers
# This is not working for all cases, see comments in runclaw.py
for logger_name in ['pyclaw.io', 'pyclaw.solution', 'plot', 'pyclaw.solver',
'f2py','data']:
runclaw.replace_stream_handlers(logger_name,log_path,log_file_append=False)
# Load in appropriate PyClaw version
if kargs.get('use_petsc',False):
import clawpack.petclaw as pyclaw
else:
import clawpack.pyclaw as pyclaw
# =================
# = Create Solver =
# =================
if kargs.get('solver_type','classic') == 'classic':
solver = pyclaw.ClawSolver1D(riemann_solver=layered_shallow_water_1D)
else:
raise NotImplementedError('Classic is currently the only supported solver.')
# Solver method parameters
solver.cfl_desired = 0.9
solver.cfl_max = 1.0
solver.max_steps = 5000
solver.fwave = True
solver.kernel_language = 'Fortran'
solver.limiters = 3
solver.source_split = 1
# Boundary conditions
# Use wall boundary condition at beach
solver.bc_lower[0] = 1
solver.bc_upper[0] = 0
solver.user_bc_upper = ml.bc.wall_qbc_upper
solver.aux_bc_lower[0] = 1
solver.aux_bc_upper[0] = 1
# Set the before step function
solver.before_step = lambda solver,solution:ml.step.before_step(solver,
solution)
# Use simple friction source term
solver.step_source = ml.step.friction_source
# ============================
# = Create Initial Condition =
# ============================
num_layers = 2
x = pyclaw.Dimension(-400e3, 0.0, num_cells, 'x')
domain = pyclaw.Domain([x])
state = pyclaw.State(domain, 2 * num_layers, 3 + num_layers)
state.aux[ml.aux.kappa_index,:] = 0.0
# Set physics data
state.problem_data['g'] = 9.8
state.problem_data['manning'] = 0.0
state.problem_data['rho_air'] = 1.15
state.problem_data['rho'] = [1025.0,1045.0]
state.problem_data['r'] = state.problem_data['rho'][0] / state.problem_data['rho'][1]
state.problem_data['one_minus_r'] = 1.0 - state.problem_data['r']
state.problem_data['num_layers'] = num_layers
# Set method parameters, this ensures it gets to the Fortran routines
state.problem_data['eigen_method'] = eigen_method
state.problem_data['dry_tolerance'] = 1e-3
state.problem_data['inundation_method'] = 2
state.problem_data['entropy_fix'] = False
solution = pyclaw.Solution(state,domain)
solution.t = 0.0
# Set aux arrays including bathymetry, wind field and linearized depths
ml.aux.set_jump_bathymetry(solution.state, -30e3, [-4000.0, -100.0])
ml.aux.set_no_wind(solution.state)
ml.aux.set_h_hat(solution.state, 0.5, [0.0, -300.0], [0.0, -300.0])
# Set perturbation to sea at rest
ml.qinit.set_acta_numerica_init_condition(solution.state, 0.4)
# ================================
# = Create simulation controller =
# ================================
controller = pyclaw.Controller()
controller.solution = solution
controller.solver = solver
# Output parameters
controller.output_style = 1
controller.tfinal = 7200.0
controller.num_output_times = 300
# controller.output_style = 2
# controller.out_times = [0.0,720.0,2400.0,4800.0,7200.0]
controller.write_aux_init = True
controller.outdir = outdir
controller.write_aux = True
# ==================
# = Run Simulation =
# ==================
state = controller.run()
# ============
# = Plotting =
# ============
plot_kargs = {"eta":[0.0,-300.0],
"rho":solution.state.problem_data['rho'],
"g":solution.state.problem_data['g'],
"dry_tolerance":solution.state.problem_data['dry_tolerance'],
"bathy_ref_lines":[-30e3]}
plot(setplot="./setplot_shelf.py", outdir=outdir, plotdir=plotdir,
htmlplot=kargs.get('htmlplot', False), iplot=kargs.get('iplot', False),
file_format=controller.output_format, **plot_kargs)
def sloped_shelf(num_cells,eigen_method,**kargs):
r"""Shelf test"""
# Construct output and plot directory paths
prefix = 'ml_e%s_n%s' % (eigen_method, num_cells)
name = 'multilayer/sloped_shelf'
outdir,plotdir,log_path = runclaw.create_output_paths(name, prefix, **kargs)
# Redirect loggers
# This is not working for all cases, see comments in runclaw.py
for logger_name in ['io', 'solution', 'plot', 'evolve', 'f2py', 'data']:
runclaw.replace_stream_handlers(logger_name,log_path,log_file_append=False)
# Load in appropriate PyClaw version
if kargs.get('use_petsc',False):
import clawpack.petclaw as pyclaw
else:
import clawpack.pyclaw as pyclaw
# =================
# = Create Solver =
# =================
if kargs.get('solver_type', 'classic') == 'classic':
solver = pyclaw.ClawSolver1D(riemann_solver=layered_shallow_water_1D)
else:
raise NotImplementedError('Classic is currently the only supported solver.')
# Solver method parameters
solver.cfl_desired = 0.9
solver.cfl_max = 1.0
solver.max_steps = 5000
solver.fwave = True
solver.kernel_language = 'Fortran'
solver.num_waves = 4
solver.limiters = 3
solver.source_split = 1
# Boundary conditions
# Use wall boundary condition at beach
solver.bc_lower[0] = 1
solver.bc_upper[0] = 0
solver.user_bc_upper = ml.bc.wall_qbc_upper
solver.aux_bc_lower[0] = 1
solver.aux_bc_upper[0] = 1
# Set the Riemann solver
solver.rp = layered_shallow_water_1D
# Set the before step function
solver.before_step = lambda solver, solution:ml.step.before_step(solver,
solution)
# Use simple friction source term
solver.step_source = ml.step.friction_source
# ============================
# = Create Initial Condition =
# ============================
num_layers = 2
x = pyclaw.Dimension(-400e3, 0.0, num_cells, 'x')
domain = pyclaw.Domain([x])
state = pyclaw.State(domain, 2 * num_layers, 3 + num_layers)
state.aux[ml.aux.kappa_index,:] = 0.0
# Set physics data
state.problem_data['g'] = 9.8
state.problem_data['manning'] = 0.0
state.problem_data['rho_air'] = 1.15
state.problem_data['rho'] = [1025.0, 1045.0]
state.problem_data['r'] = state.problem_data['rho'][0] / state.problem_data['rho'][1]
state.problem_data['one_minus_r'] = 1.0 - state.problem_data['r']
state.problem_data['num_layers'] = num_layers
# Set method parameters, this ensures it gets to the Fortran routines
state.problem_data['eigen_method'] = eigen_method
state.problem_data['dry_tolerance'] = 1e-3
state.problem_data['inundation_method'] = 2
state.problem_data['entropy_fix'] = False
solution = pyclaw.Solution(state,domain)
solution.t = 0.0
# Set aux arrays including bathymetry, wind field and linearized depths
x0 = -130e3
x1 = -30e3
ml.aux.set_sloped_shelf_bathymetry(solution.state, x0, x1, -4000.0, -100.0)
ml.aux.set_no_wind(solution.state)
ml.aux.set_h_hat(solution.state, 0.5, [0.0, -300.0], [0.0, -300.0])
# Set perturbation to sea at rest
ml.qinit.set_acta_numerica_init_condition(solution.state, 0.4)
# ================================
# = Create simulation controller =
# ================================
controller = pyclaw.Controller()
controller.solution = solution
controller.solver = solver
# Output parameters
controller.output_style = 1
controller.tfinal = 7200.0
controller.num_output_times = 300
controller.write_aux_init = True
controller.outdir = outdir
controller.write_aux = True
# ==================
# = Run Simulation =
# ==================
state = controller.run()
# ============
# = Plotting =
# ============
plot_kargs = {"eta":[0.0,-300.0],
"rho":solution.state.problem_data['rho'],
"g":solution.state.problem_data['g'],
"dry_tolerance":solution.state.problem_data['dry_tolerance'],
"bathy_ref_lines":[x0,x1]}
plot(setplot="./setplot_shelf.py",outdir=outdir,plotdir=plotdir,
htmlplot=kargs.get('htmlplot',False),iplot=kargs.get('iplot',False),
file_format=controller.output_format,**plot_kargs)
if __name__ == "__main__":
# Run the test for the requested eigen methods for the jump and slope bathys
if len(sys.argv) > 1:
eig_methods = []
for value in sys.argv[1:]:
eig_methods.append(int(value))
else:
eig_methods = [2]
for method in eig_methods:
jump_shelf(2000,method,iplot=False,htmlplot=True)
for method in eig_methods:
sloped_shelf(2000,method,iplot=False,htmlplot=True)
| 34.720848
| 89
| 0.609709
|
d711e607d3c17253e02602c09c80a84ca2b373b6
| 4,282
|
py
|
Python
|
utils.py
|
shredderzwj/isoline
|
46056251192d58019fa820c663c8affc60d58882
|
[
"Apache-2.0"
] | 2
|
2020-02-07T04:56:33.000Z
|
2020-08-19T10:49:58.000Z
|
utils.py
|
shredderzwj/isoline
|
46056251192d58019fa820c663c8affc60d58882
|
[
"Apache-2.0"
] | null | null | null |
utils.py
|
shredderzwj/isoline
|
46056251192d58019fa820c663c8affc60d58882
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
import numpy as np
import random
import os
import time
import grid
def mask_grid_data(mgrid, grid_data, ax, ay):
"""
过滤网格数据至指定封闭图形区域内部,将区域外的 griddata值设置为 np.nan 以达到屏蔽的效果。
:param mgrid: np.mgrid Type -> 整个网格
:param grid_data: scipy.interpolate.griddata Type -> 整个网格数据
:param ax: list, tuple or array etc. Type -> 封闭区域 x坐标
:param ay: list, tuple or array etc. Type -> 封闭区域 y坐标
:return: list -> index=0, np.mgrid Type 过滤后的网格; index=1, scipy.interpolate.griddata Type 过滤后的网格数据
"""
# 首先将网格缩小至多边形外包的矩形大小
max_x = np.max(ax)
min_x = np.min(ax)
max_y = np.max(ay)
min_y = np.min(ay)
mask = ((mgrid[0] > max_x) | (mgrid[0] < min_x)) | ((mgrid[1] > max_y) | (mgrid[1] < min_y))
for row in range(len(mask)):
mgrid_convert_lat = mgrid[1][row][~mask[row]]
lat_num = len(mgrid_convert_lat)
if lat_num:
break
all_num = len(grid_data[~mask])
lon_num = int(all_num / lat_num)
grid_data_convert = grid_data[~mask].reshape(lon_num, lat_num)
mgrid_convert_lon = mgrid[0][~mask].reshape(lon_num, lat_num)
mgrid_convert_lat = mgrid[1][~mask].reshape(lon_num, lat_num)
mgrid_convert = np.array([mgrid_convert_lon, mgrid_convert_lat])
# 逐行扫描,获得需要屏蔽绘图的多边形外面的部分网格。
mask_shape = []
for col in range(len(mgrid_convert[1][0])):
# 直线方程参数
c = - mgrid_convert[1][0][col]
# 获取网格每行所在直线Li与多边形所有交点 jds。
# 从绘图区域的最左边开始,遇到一个交点,此后的网格点将进行一次 内/外 的转换。
# 位于直线Li上面并且是 V或倒V 形的交点,由于其对 内/外 转换无影响,故忽略。
jds = []
for i in range(len(ax) - 1):
tmpf1 = ay[i] + c
tmpf2 = ay[i + 1] + c
ji = tmpf1 * tmpf2
if ji < 0:
y = -c
x = (y - ay[i])*(ax[i + 1] - ax[i]) / (ay[i + 1] - ay[i]) + ax[i]
jds.append([x, y])
elif ji == 0:
if (ay[i - 1] + c) * (ay[i + 1] + c) < 0:
y = -c
x = (y - ay[i]) * (ax[i + 1] - ax[i]) / (ay[i + 1] - ay[i]) + ax[i]
jds.append([x, y])
if jds:
jds.sort()
mask_tmp = []
for i in range(len(jds)):
if i == 0:
mask_tmp.append(((mgrid_convert[0] <= jds[i][0]) & (mgrid_convert[1] == jds[i][1])).T[col])
elif i == len(jds) - 1:
mask_tmp.append(((mgrid_convert[0] >= jds[i][0]) & (mgrid_convert[1] == jds[i][1])).T[col])
elif np.mod(i, 2) == 0:
mask_tmp.append(((mgrid_convert[0] >= jds[i - 1][0]) & (mgrid_convert[0] <= jds[i][0]) & (mgrid_convert[1] == jds[i][1])).T[col])
else:
mask_tmp.append((mgrid_convert[0] <= -100000000).T[0])
mask_ = (mgrid_convert[0] <= -100000000).T[0]
for _mask in mask_tmp:
mask_ = mask_ | _mask
mask_shape.append(mask_)
grid_data_convert[np.array(mask_shape).T] = np.nan
return [mgrid_convert, grid_data_convert]
def is_in_area(x, y, ax, ay):
"""
判断一点是否位于指定封闭多边形内部
:param x: float -> 点的x坐标
:param y: float -> 点的y坐标
:param ax: list, tuple or array etc. Type -> 多边形 x坐标列表
:param ay: list, tuple or array etc. Type -> 多边形 y坐标列表
:return: bool -> True 在内部; False 在外部; None 在边界上。
"""
# 保证多边形的顶点不位于射线上
while True:
# 假定射线所在直线方程 ax + by + c = 0; (x, y) 为端点, (xp, yp)为方向
xp = x + random.uniform(1, 100)
yp = random.uniform(1, 100) * max(ay)
# a, b, c 为假定射线所在的方程 参数
a = yp - y
b = x - xp
c = xp * y - x * yp
j = 0
for i in range(len(ax)):
if abs(a * ax[i] + b * ay[i] + c) < 1e-30:
j += 1
if j == 0:
break
# 判断射线与各个边的交点个数
jd = 0
for i in range(len(ax) - 1):
tmpf1 = a * ax[i] + b * ay[i] + c
tmpf2 = a * ax[i + 1] + b * ay[i + 1] + c
tmpa = np.arccos(
((ax[i] - x) * (xp - x) + (ay[i] - y) * (yp - y)) / np.sqrt((ax[i] - x) ** 2 + (ay[i] - y) ** 2) / np.sqrt(
(xp - x) ** 2 + (yp - y) ** 2))
tmpb = np.arccos(((ax[i + 1] - x) * (xp - x) + (ay[i + 1] - y) * (yp - y)) / np.sqrt(
(ax[i + 1] - x) ** 2 + (ay[i + 1] - y) ** 2) / np.sqrt((xp - x) ** 2 + (yp - y) ** 2))
if (tmpf1 * tmpf2 < 0) and (tmpa + tmpb - np.pi < 0):
jd += 1
if tmpf1 * tmpf2 == 0:
return None
# 如果交点数为奇数,则位于内部;如果为偶数,位于外部。
if np.mod(jd, 2) == 0:
return False
else:
return True
if __name__ == '__main__':
grid_s = grid.Grid(
shp_file_path=os.path.join('shp', '省界_region.shp'),
region='河南省',
density=100,
)
t1 = time.time()
for i in range(10):
b = is_in_area(114.01, 33.32, grid_s.lons, grid_s.lats)
print(b)
print(time.time() - t1)
| 31.718519
| 134
| 0.564923
|
4f01e397373b78a3d62757122034ed4859160b75
| 14,540
|
py
|
Python
|
tests/test_client.py
|
northtree/google-maps-services-python
|
891321c3209e13130b760661d23b2e5cd741e39f
|
[
"Apache-2.0"
] | 3,797
|
2015-01-08T05:42:35.000Z
|
2022-03-30T11:45:13.000Z
|
tests/test_client.py
|
northtree/google-maps-services-python
|
891321c3209e13130b760661d23b2e5cd741e39f
|
[
"Apache-2.0"
] | 344
|
2015-01-09T05:39:11.000Z
|
2022-03-22T07:08:56.000Z
|
tests/test_client.py
|
northtree/google-maps-services-python
|
891321c3209e13130b760661d23b2e5cd741e39f
|
[
"Apache-2.0"
] | 1,368
|
2015-01-08T13:04:52.000Z
|
2022-03-29T13:13:31.000Z
|
#
# Copyright 2014 Google Inc. All rights reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
#
"""Tests for client module."""
import time
import responses
import requests
import uuid
import googlemaps
import googlemaps.client as _client
from . import TestCase
from googlemaps.client import _X_GOOG_MAPS_EXPERIENCE_ID
class ClientTest(TestCase):
def test_no_api_key(self):
with self.assertRaises(Exception):
client = googlemaps.Client()
client.directions("Sydney", "Melbourne")
def test_invalid_api_key(self):
with self.assertRaises(Exception):
client = googlemaps.Client(key="Invalid key.")
client.directions("Sydney", "Melbourne")
def test_urlencode(self):
# See GH #72.
encoded_params = _client.urlencode_params([("address", "=Sydney ~")])
self.assertEqual("address=%3DSydney+~", encoded_params)
@responses.activate
def test_queries_per_second(self):
# This test assumes that the time to run a mocked query is
# relatively small, eg a few milliseconds. We define a rate of
# 3 queries per second, and run double that, which should take at
# least 1 second but no more than 2.
queries_per_second = 3
query_range = range(queries_per_second * 2)
for _ in query_range:
responses.add(
responses.GET,
"https://maps.googleapis.com/maps/api/geocode/json",
body='{"status":"OK","results":[]}',
status=200,
content_type="application/json",
)
client = googlemaps.Client(
key="AIzaasdf", queries_per_second=queries_per_second
)
start = time.time()
for _ in query_range:
client.geocode("Sesame St.")
end = time.time()
self.assertTrue(start + 1 < end < start + 2)
@responses.activate
def test_key_sent(self):
responses.add(
responses.GET,
"https://maps.googleapis.com/maps/api/geocode/json",
body='{"status":"OK","results":[]}',
status=200,
content_type="application/json",
)
client = googlemaps.Client(key="AIzaasdf")
client.geocode("Sesame St.")
self.assertEqual(1, len(responses.calls))
self.assertURLEqual(
"https://maps.googleapis.com/maps/api/geocode/json?"
"key=AIzaasdf&address=Sesame+St.",
responses.calls[0].request.url,
)
@responses.activate
def test_extra_params(self):
responses.add(
responses.GET,
"https://maps.googleapis.com/maps/api/geocode/json",
body='{"status":"OK","results":[]}',
status=200,
content_type="application/json",
)
client = googlemaps.Client(key="AIzaasdf")
client.geocode("Sesame St.", extra_params={"foo": "bar"})
self.assertEqual(1, len(responses.calls))
self.assertURLEqual(
"https://maps.googleapis.com/maps/api/geocode/json?"
"key=AIzaasdf&address=Sesame+St.&foo=bar",
responses.calls[0].request.url,
)
def test_hmac(self):
"""
From http://en.wikipedia.org/wiki/Hash-based_message_authentication_code
HMAC_SHA1("key", "The quick brown fox jumps over the lazy dog")
= 0xde7c9b85b8b78aa6bc8a7a36f70a90701c9db4d9
"""
message = "The quick brown fox jumps over the lazy dog"
key = "a2V5" # "key" -> base64
signature = "3nybhbi3iqa8ino29wqQcBydtNk="
self.assertEqual(signature, _client.sign_hmac(key, message))
@responses.activate
def test_url_signed(self):
responses.add(
responses.GET,
"https://maps.googleapis.com/maps/api/geocode/json",
body='{"status":"OK","results":[]}',
status=200,
content_type="application/json",
)
client = googlemaps.Client(client_id="foo", client_secret="a2V5")
client.geocode("Sesame St.")
self.assertEqual(1, len(responses.calls))
# Check ordering of parameters.
self.assertIn(
"address=Sesame+St.&client=foo&signature", responses.calls[0].request.url
)
self.assertURLEqual(
"https://maps.googleapis.com/maps/api/geocode/json?"
"address=Sesame+St.&client=foo&"
"signature=fxbWUIcNPZSekVOhp2ul9LW5TpY=",
responses.calls[0].request.url,
)
@responses.activate
def test_ua_sent(self):
responses.add(
responses.GET,
"https://maps.googleapis.com/maps/api/geocode/json",
body='{"status":"OK","results":[]}',
status=200,
content_type="application/json",
)
client = googlemaps.Client(key="AIzaasdf")
client.geocode("Sesame St.")
self.assertEqual(1, len(responses.calls))
user_agent = responses.calls[0].request.headers["User-Agent"]
self.assertTrue(user_agent.startswith("GoogleGeoApiClientPython"))
@responses.activate
def test_retry(self):
class request_callback:
def __init__(self):
self.first_req = True
def __call__(self, req):
if self.first_req:
self.first_req = False
return (200, {}, '{"status":"OVER_QUERY_LIMIT"}')
return (200, {}, '{"status":"OK","results":[]}')
responses.add_callback(
responses.GET,
"https://maps.googleapis.com/maps/api/geocode/json",
content_type="application/json",
callback=request_callback(),
)
client = googlemaps.Client(key="AIzaasdf")
client.geocode("Sesame St.")
self.assertEqual(2, len(responses.calls))
self.assertEqual(responses.calls[0].request.url, responses.calls[1].request.url)
@responses.activate
def test_transport_error(self):
responses.add(
responses.GET,
"https://maps.googleapis.com/maps/api/geocode/json",
status=404,
content_type="application/json",
)
client = googlemaps.Client(key="AIzaasdf")
with self.assertRaises(googlemaps.exceptions.HTTPError) as e:
client.geocode("Foo")
self.assertEqual(e.exception.status_code, 404)
@responses.activate
def test_host_override_on_init(self):
responses.add(
responses.GET,
"https://foo.com/bar",
body='{"status":"OK","results":[]}',
status=200,
content_type="application/json",
)
client = googlemaps.Client(key="AIzaasdf", base_url="https://foo.com")
client._get("/bar", {})
self.assertEqual(1, len(responses.calls))
@responses.activate
def test_host_override_per_request(self):
responses.add(
responses.GET,
"https://foo.com/bar",
body='{"status":"OK","results":[]}',
status=200,
content_type="application/json",
)
client = googlemaps.Client(key="AIzaasdf")
client._get("/bar", {}, base_url="https://foo.com")
self.assertEqual(1, len(responses.calls))
@responses.activate
def test_custom_extract(self):
def custom_extract(resp):
return resp.json()
responses.add(
responses.GET,
"https://maps.googleapis.com/bar",
body='{"error":"errormessage"}',
status=403,
content_type="application/json",
)
client = googlemaps.Client(key="AIzaasdf")
b = client._get("/bar", {}, extract_body=custom_extract)
self.assertEqual(1, len(responses.calls))
self.assertEqual("errormessage", b["error"])
@responses.activate
def test_retry_intermittent(self):
class request_callback:
def __init__(self):
self.first_req = True
def __call__(self, req):
if self.first_req:
self.first_req = False
return (500, {}, "Internal Server Error.")
return (200, {}, '{"status":"OK","results":[]}')
responses.add_callback(
responses.GET,
"https://maps.googleapis.com/maps/api/geocode/json",
content_type="application/json",
callback=request_callback(),
)
client = googlemaps.Client(key="AIzaasdf")
client.geocode("Sesame St.")
self.assertEqual(2, len(responses.calls))
def test_invalid_channel(self):
# Cf. limitations here:
# https://developers.google.com/maps/premium/reports
# /usage-reports#channels
with self.assertRaises(ValueError):
client = googlemaps.Client(
client_id="foo", client_secret="a2V5", channel="auieauie$? "
)
def test_auth_url_with_channel(self):
client = googlemaps.Client(
key="AIzaasdf", client_id="foo", client_secret="a2V5", channel="MyChannel_1"
)
# Check ordering of parameters + signature.
auth_url = client._generate_auth_url(
"/test", {"param": "param"}, accepts_clientid=True
)
self.assertEqual(
auth_url,
"/test?param=param"
"&channel=MyChannel_1"
"&client=foo"
"&signature=OH18GuQto_mEpxj99UimKskvo4k=",
)
# Check if added to requests to API with accepts_clientid=False
auth_url = client._generate_auth_url(
"/test", {"param": "param"}, accepts_clientid=False
)
self.assertEqual(auth_url, "/test?param=param&key=AIzaasdf")
def test_requests_version(self):
client_args_timeout = {
"key": "AIzaasdf",
"client_id": "foo",
"client_secret": "a2V5",
"channel": "MyChannel_1",
"connect_timeout": 5,
"read_timeout": 5,
}
client_args = client_args_timeout.copy()
del client_args["connect_timeout"]
del client_args["read_timeout"]
requests.__version__ = "2.3.0"
with self.assertRaises(NotImplementedError):
googlemaps.Client(**client_args_timeout)
googlemaps.Client(**client_args)
requests.__version__ = "2.4.0"
googlemaps.Client(**client_args_timeout)
googlemaps.Client(**client_args)
def test_single_experience_id(self):
experience_id1 = "Exp1"
client = googlemaps.Client(key="AIzaasdf", experience_id=experience_id1)
self.assertEqual(experience_id1, client.get_experience_id())
experience_id2 = "Exp2"
client.set_experience_id(experience_id2)
self.assertEqual(experience_id2, client.get_experience_id())
def test_multiple_experience_id(self):
client = googlemaps.Client(key="AIzaasdf")
experience_id1 = "Exp1"
experience_id2 = "Exp2"
client.set_experience_id(experience_id1, experience_id2)
result = "%s,%s" % (experience_id1, experience_id2)
self.assertEqual(result, client.get_experience_id())
def test_no_experience_id(self):
client = googlemaps.Client(key="AIzaasdf")
self.assertIsNone(client.get_experience_id())
def test_clearing_experience_id(self):
client = googlemaps.Client(key="AIzaasdf")
client.set_experience_id("ExpId")
client.clear_experience_id()
self.assertIsNone(client.get_experience_id())
def test_experience_id_sample(self):
# [START maps_experience_id]
experience_id = str(uuid.uuid4())
# instantiate client with experience id
client = googlemaps.Client(key="AIza-Maps-API-Key", experience_id=experience_id)
# clear the current experience id
client.clear_experience_id()
# set a new experience id
other_experience_id = str(uuid.uuid4())
client.set_experience_id(experience_id, other_experience_id)
# make API request, the client will set the header
# X-GOOG-MAPS-EXPERIENCE-ID: experience_id,other_experience_id
# get current experience id
ids = client.get_experience_id()
# [END maps_experience_id]
result = "%s,%s" % (experience_id, other_experience_id)
self.assertEqual(result, ids)
@responses.activate
def _perform_mock_request(self, experience_id=None):
# Mock response
responses.add(
responses.GET,
"https://maps.googleapis.com/maps/api/geocode/json",
body='{"status":"OK","results":[]}',
status=200,
content_type="application/json",
)
# Perform network call
client = googlemaps.Client(key="AIzaasdf")
client.set_experience_id(experience_id)
client.geocode("Sesame St.")
return responses.calls[0].request
def test_experience_id_in_header(self):
experience_id = "Exp1"
request = self._perform_mock_request(experience_id)
header_value = request.headers[_X_GOOG_MAPS_EXPERIENCE_ID]
self.assertEqual(experience_id, header_value)
def test_experience_id_no_in_header(self):
request = self._perform_mock_request()
self.assertIsNone(request.headers.get(_X_GOOG_MAPS_EXPERIENCE_ID))
@responses.activate
def test_no_retry_over_query_limit(self):
responses.add(
responses.GET,
"https://maps.googleapis.com/foo",
body='{"status":"OVER_QUERY_LIMIT"}',
status=200,
content_type="application/json",
)
client = googlemaps.Client(key="AIzaasdf", retry_over_query_limit=False)
with self.assertRaises(googlemaps.exceptions.ApiError):
client._request("/foo", {})
self.assertEqual(1, len(responses.calls))
| 33.579677
| 88
| 0.608735
|
6f5150ac6e8470c8db16274c396a857a7eceaa72
| 605
|
py
|
Python
|
album/migrations/0001_initial.py
|
Chebichii-Lab/Personal-Gallery
|
efdcf232513d41b2e3d1ba283cde0e88170968cd
|
[
"MIT"
] | null | null | null |
album/migrations/0001_initial.py
|
Chebichii-Lab/Personal-Gallery
|
efdcf232513d41b2e3d1ba283cde0e88170968cd
|
[
"MIT"
] | null | null | null |
album/migrations/0001_initial.py
|
Chebichii-Lab/Personal-Gallery
|
efdcf232513d41b2e3d1ba283cde0e88170968cd
|
[
"MIT"
] | 1
|
2021-09-07T10:41:37.000Z
|
2021-09-07T10:41:37.000Z
|
# Generated by Django 3.2.5 on 2021-07-02 16:05
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Image',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to='images/')),
('name', models.CharField(max_length=100)),
('description', models.TextField()),
],
),
]
| 25.208333
| 114
| 0.561983
|
d9b83347332aa139c46972a57ab780ba6d27b854
| 8,853
|
py
|
Python
|
fairseq/tasks/multilingual_denoising.py
|
Csinclair0/fairseq
|
6d9cf6a850c31d12a3ac63e89b005756b09cebeb
|
[
"MIT"
] | null | null | null |
fairseq/tasks/multilingual_denoising.py
|
Csinclair0/fairseq
|
6d9cf6a850c31d12a3ac63e89b005756b09cebeb
|
[
"MIT"
] | null | null | null |
fairseq/tasks/multilingual_denoising.py
|
Csinclair0/fairseq
|
6d9cf6a850c31d12a3ac63e89b005756b09cebeb
|
[
"MIT"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import numpy as np
from fairseq.data import (
AppendTokenDataset,
ConcatDataset,
DenoisingDataset,
Dictionary,
PrependTokenDataset,
ResamplingDataset,
SortDataset,
TokenBlockDataset,
data_utils,
)
from fairseq.data.encoders.utils import get_whole_word_mask
from fairseq.tasks import register_task
from .denoising import DenoisingTask
logger = logging.getLogger(__name__)
@register_task("multilingual_denoising")
class MultilingualDenoisingTask(DenoisingTask):
@staticmethod
def add_args(parser):
DenoisingTask.add_args(parser)
parser.add_argument(
"--multilang-sampling-alpha",
type=float,
default=1.0,
help="smoothing alpha for sample ratios across multiple datasets",
)
parser.add_argument("--add-lang-token", default=False, action="store_true")
parser.add_argument(
"--langs", type=str, help="language ids we are considering", default=None
)
parser.add_argument(
"--no-whole-word-mask-langs",
type=str,
default="",
metavar="N",
help="languages without spacing between words dont support whole word masking",
)
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task."""
paths = args.data.split(":")
assert len(paths) > 0
dictionary = Dictionary.load(os.path.join(paths[0], "dict.txt"))
data_path = paths[0]
if args.langs is None:
languages = sorted(
[
name
for name in os.listdir(data_path)
if os.path.isdir(os.path.join(data_path, name))
]
)
else:
languages = args.langs.split(",")
if args.add_lang_token:
for lang in languages:
dictionary.add_symbol("[{}]".format(lang))
logger.info("dictionary: {} types".format(len(dictionary)))
if not hasattr(args, "shuffle_instance"):
args.shuffle_instance = False
return cls(args, dictionary)
def __init__(self, args, dictionary):
super().__init__(args, dictionary)
self.dictionary = dictionary
self.seed = args.seed
# add mask token
self.mask_idx = self.dictionary.add_symbol("<mask>")
self.langs = args.langs
self.args = args
def _get_sample_prob(self, dataset_lens):
"""
Get smoothed sampling probability by languages. This helps low resource
languages by upsampling them.
"""
prob = dataset_lens / dataset_lens.sum()
smoothed_prob = prob ** self.args.multilang_sampling_alpha
smoothed_prob = smoothed_prob / smoothed_prob.sum()
return smoothed_prob
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
paths = self.args.data.split(":")
assert len(paths) > 0
data_path = paths[(epoch - 1) % len(paths)]
split_path = os.path.join(data_path, split)
if self.langs is None:
languages = sorted(
[
name
for name in os.listdir(data_path)
if os.path.isdir(os.path.join(data_path, name))
]
)
else:
languages = self.langs.split(",")
dsets = []
for name in languages:
p = os.path.join(data_path, name)
if os.path.exists(p): dsets += [name]
#assert os.path.exists(p), "data not found: {}".format(p)
logger.info("Training on {0} languages: {1}".format(len(languages), languages))
logger.info(
"Language to id mapping: ", {lang: id for id, lang in enumerate(languages)}
)
mask_whole_words = get_whole_word_mask(self.args, self.dictionary)
language_without_segmentations = self.args.no_whole_word_mask_langs.split(",")
lang_datasets = []
for language in dsets:
split_path = os.path.join(data_path, language, split)
dataset = data_utils.load_indexed_dataset(
split_path,
self.source_dictionary,
self.args.dataset_impl,
combine=combine,
)
#if dataset is None:
#raise FileNotFoundError(
# "Dataset not found: {} ({})".format(split, split_path)
#)
end_token = (
self.source_dictionary.index("[{}]".format(language))
if self.args.add_lang_token
else self.source_dictionary.eos()
)
# create continuous blocks of tokens
dataset = TokenBlockDataset(
dataset,
dataset.sizes,
self.args.tokens_per_sample - 2, # one less for <s>
pad=self.source_dictionary.pad(),
eos=end_token,
break_mode=self.args.sample_break_mode,
)
logger.info("loaded {} blocks from: {}".format(len(dataset), split_path))
# prepend beginning-of-sentence token (<s>, equiv. to [CLS] in BERT)
dataset = PrependTokenDataset(dataset, self.source_dictionary.bos())
dataset = AppendTokenDataset(dataset, end_token)
lang_mask_whole_words = (
mask_whole_words
if language not in language_without_segmentations
else None
)
lang_dataset = DenoisingDataset(
dataset,
dataset.sizes,
self.dictionary,
self.mask_idx,
lang_mask_whole_words,
shuffle=self.args.shuffle_instance,
seed=self.seed,
args=self.args,
eos=None
if not self.args.add_lang_token
else self.source_dictionary.index("[{}]".format(language)),
)
lang_datasets.append(lang_dataset)
dataset_lengths = np.array(
[len(d) for d in lang_datasets],
dtype=float,
)
logger.info(
"loaded total {} blocks for all languages".format(
int(dataset_lengths.sum()),
)
)
if split == self.args.train_subset:
# For train subset, additionally up or down sample languages.
sample_probs = self._get_sample_prob(dataset_lengths)
logger.info(
"Sample probability by language: {}".format(
{
lang: "{0:.4f}".format(sample_probs[id])
for id, lang in enumerate(dsets)
}
)
)
size_ratio = (sample_probs * dataset_lengths.sum()) / dataset_lengths
logger.info(
"Up/Down Sampling ratio by language: {}".format(
{
lang: "{0:.2f}".format(size_ratio[id])
for id, lang in enumerate(dsets)
}
)
)
resampled_lang_datasets = [
ResamplingDataset(
lang_datasets[i],
size_ratio=size_ratio[i],
seed=self.args.seed,
epoch=epoch,
replace=size_ratio[i] >= 1.0,
)
for i, d in enumerate(lang_datasets)
]
dataset = ConcatDataset(
resampled_lang_datasets,
)
else:
dataset = ConcatDataset(lang_datasets)
lang_splits = [split]
for lang_id, lang_dataset in enumerate(lang_datasets):
split_name = split + "_" + languages[lang_id]
lang_splits.append(split_name)
self.datasets[split_name] = lang_dataset
if split in self.args.valid_subset:
self.args.valid_subset = self.args.valid_subset.replace(
split, ",".join(lang_splits)
)
with data_utils.numpy_seed(self.args.seed + epoch):
shuffle = np.random.permutation(len(dataset))
self.datasets[split] = SortDataset(
dataset,
sort_order=[
shuffle,
dataset.sizes,
],
)
| 34.313953
| 91
| 0.537897
|
6085315557723f734d92a5aa2490c8f5c9eed1c3
| 9,840
|
py
|
Python
|
predict_shap_local.py
|
showkeyjar/beauty
|
7c944cf896c899d9e23b2e50e293103bb03fe6cd
|
[
"MulanPSL-1.0"
] | 1
|
2022-01-29T12:32:38.000Z
|
2022-01-29T12:32:38.000Z
|
predict_shap_local.py
|
showkeyjar/beauty
|
7c944cf896c899d9e23b2e50e293103bb03fe6cd
|
[
"MulanPSL-1.0"
] | null | null | null |
predict_shap_local.py
|
showkeyjar/beauty
|
7c944cf896c899d9e23b2e50e293103bb03fe6cd
|
[
"MulanPSL-1.0"
] | null | null | null |
# %% coding=utf-8
import sys
import shap
import dlib
import dill
import warnings
import numpy as np
import pandas as pd
from sklearn.externals import joblib
from gen_report import gen_report
from shap.common import convert_to_link, Instance, Model, Data, DenseData, Link
"""
预测解释 shap
"""
#%%
predictor_path = "model/shape_predictor_68_face_landmarks.dat"
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(predictor_path)
model = joblib.load('model/beauty.pkl')
#explainer = joblib.load('model/explainer.pkl')
with open('model/explainer.pkl', 'rb') as f:
explainer = dill.load(f)
df_input = pd.read_csv('data/face/df_input.csv', dtype=np.float64)
df_label = df_input['label'].values
df_input = df_input.drop(['Unnamed: 0', 'Image', 'label'], axis=1)
feature_names = df_input.columns
df_input = df_input.values
#print(feature_names)
df_explain = pd.read_csv('model/explain.csv')
df_explain['key'] = df_explain['key'].astype(str)
#%%
def prepare_input(img_path):
img = dlib.load_rgb_image(img_path)
dets = detector(img, 1)
df_image = None
for k, d in enumerate(dets):
# print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(k, d.left(), d.top(), d.right(), d.bottom()))
f_width = abs(d.right() - d.left())
f_height = abs(d.bottom() - d.top())
# print('width:' + str(f_width) + ', height:' + str(f_height))
# Get the landmarks/parts for the face in box d.
shape = predictor(img, d)
# print("Part 0: {}, Part 1: {} ...".format(shape.part(0), shape.part(1)))
face_shape = {}
for i in range(0, 67):
for j in range(i + 1, 68):
face_shape[str(i) + '_' + str(j) + '_x'] = abs(shape.part(i).x - shape.part(j).x) / f_width
face_shape[str(i) + '_' + str(j) + '_y'] = abs(shape.part(i).y - shape.part(j).y) / f_height
# print(str(i) + '_' + str(j))
# shape_size.append(face_shape)
df_image = pd.DataFrame.from_dict([face_shape])
break
return df_image
class Explanation:
def __init__(self):
pass
class AdditiveExplanation(Explanation):
def __init__(self, base_value, out_value, effects, effects_var, instance, link, model, data):
self.base_value = base_value
self.out_value = out_value
self.effects = effects
self.effects_var = effects_var
assert isinstance(instance, Instance)
self.instance = instance
assert isinstance(link, Link)
self.link = link
assert isinstance(model, Model)
self.model = model
assert isinstance(data, Data)
self.data = data
def ensure_not_numpy(x):
if isinstance(x, bytes):
return x.decode()
elif isinstance(x, np.str):
return str(x)
elif isinstance(x, np.generic):
return float(x.item())
else:
return x
def force_df(base_value, shap_values, features=None, feature_names=None, out_names=None, link="identity",
plot_cmap="RdBu", matplotlib=False, show=True, figsize=(20, 3), ordering_keys=None,
ordering_keys_time_format=None,
text_rotation=0):
# auto unwrap the base_value
if type(base_value) == np.ndarray and len(base_value) == 1:
base_value = base_value[0]
if (type(base_value) == np.ndarray or type(base_value) == list):
if type(shap_values) != list or len(shap_values) != len(base_value):
raise Exception("In v0.20 force_plot now requires the base value as the first parameter! " \
"Try shap.force_plot(explainer.expected_value, shap_values) or " \
"for multi-output models try " \
"shap.force_plot(explainer.expected_value[0], shap_values[0]).")
assert not type(shap_values) == list, "The shap_values arg looks looks multi output, try shap_values[i]."
link = convert_to_link(link)
if type(shap_values) != np.ndarray:
return shap_values
# convert from a DataFrame or other types
if str(type(features)) == "<class 'pandas.core.frame.DataFrame'>":
if feature_names is None:
feature_names = list(features.columns)
features = features.values
elif str(type(features)) == "<class 'pandas.core.series.Series'>":
if feature_names is None:
feature_names = list(features.index)
features = features.values
elif isinstance(features, list):
if feature_names is None:
feature_names = features
features = None
elif features is not None and len(features.shape) == 1 and feature_names is None:
feature_names = features
features = None
if len(shap_values.shape) == 1:
shap_values = np.reshape(shap_values, (1, len(shap_values)))
if out_names is None:
out_names = ["output value"]
elif type(out_names) == str:
out_names = [out_names]
if shap_values.shape[0] == 1:
if feature_names is None:
feature_names = [shap.labels['FEATURE'] % str(i) for i in range(shap_values.shape[1])]
if features is None:
features = ["" for _ in range(len(feature_names))]
if type(features) == np.ndarray:
features = features.flatten()
# check that the shape of the shap_values and features match
if len(features) != shap_values.shape[1]:
msg = "Length of features is not equal to the length of shap_values!"
if len(features) == shap_values.shape[1] - 1:
msg += " You might be using an old format shap_values array with the base value " \
"as the last column. In this case just pass the array without the last column."
raise Exception(msg)
instance = Instance(np.zeros((1, len(feature_names))), features)
exps = AdditiveExplanation(
base_value,
np.sum(shap_values[0, :]) + base_value,
shap_values[0, :],
None,
instance,
link,
Model(None, out_names),
DenseData(np.zeros((1, len(feature_names))), list(feature_names))
)
else:
if matplotlib:
raise Exception("matplotlib = True is not yet supported for force plots with multiple samples!")
if shap_values.shape[0] > 3000:
warnings.warn("shap.force_plot is slow for many thousands of rows, try subsampling your data.")
exps = []
for i in range(shap_values.shape[0]):
if feature_names is None:
feature_names = [shap.labels['FEATURE'] % str(i) for i in range(shap_values.shape[1])]
if features is None:
display_features = ["" for i in range(len(feature_names))]
else:
display_features = features[i, :]
instance = Instance(np.ones((1, len(feature_names))), display_features)
e = AdditiveExplanation(
base_value,
np.sum(shap_values[i, :]) + base_value,
shap_values[i, :],
None,
instance,
link,
Model(None, out_names),
DenseData(np.ones((1, len(feature_names))), list(feature_names))
)
exps.append(e)
result_df = pd.DataFrame({'feature': exps.data.group_names, 'effect': ensure_not_numpy(exps.effects), 'value': exps.instance.group_display_values})
result_df = result_df[result_df['effect'] != 0].reset_index()
return result_df
def get_explain(x):
global df_explain
points = x.split('_')
exp = ''
for p in points:
if p != 'x' and p != 'y':
try:
exp += df_explain[df_explain['key'] == p]['explain'].values[0]
except:
exp += ''
exp += '_'
if p == 'x':
exp += '宽'
elif p == 'y':
exp += '高'
return exp
def gen_report(im_path):
X_test = prepare_input(im_path)
Y_test = model.predict(X_test)
params = []
print('beauty score:' + str(Y_test))
params.append(Y_test)
shap_values = explainer.shap_values(X_test)
print('gen explain')
result = force_df(explainer.expected_value, shap_values[0, :], X_test)
result['explain'] = result['feature'].apply(get_explain)
try:
good_effect = result[result['effect'] > 0.01].sort_values('effect', ascending=False).reset_index()
except:
good_effect = None
try:
bad_effect = result[result['effect'] < 0.01].sort_values('effect').reset_index()
except:
bad_effect = None
if good_effect is not None:
good_str = str(good_effect[0:10,'explain'].values)
params.append(good_str)
print('您的优势部位:' + good_str)
if bad_effect is not None:
bad_str = str(bad_effect[0:10, 'explain'].values)
params.append(bad_str)
print('您的欠缺部位:' + bad_str)
gen_report('t1', params)
if __name__ == "__main__":
try:
test = sys.argv[1]
mode = sys.argv[2]
except:
test = "img/t1.jpg"
mode = 'shap'
# result = model.predict(df_input)
X_test = prepare_input(test)
y_test = model.predict(X_test)
print('beauty score:' + str(y_test))
shap_values = explainer.shap_values(X_test)
print('gen explain')
result = force_df(explainer.expected_value, shap_values[0, :], X_test)
result['explain'] = result['feature'].apply(get_explain)
good_effect = result[result['effect'] > 0.01].sort_values('effect', ascending=False).reset_index()
bad_effect = result[result['effect'] < 0.01].sort_values('effect').reset_index()
print('您的优势部位:' + str(good_effect[0:10,'explain'].values))
print('您的欠缺部位:' + str(bad_effect[0:10,'explain'].values))
| 39.047619
| 151
| 0.608943
|
2c194f65bbeac943ad17c27fe7145a2dd8b1ac0a
| 4,798
|
py
|
Python
|
jax/experimental/jax2tf/tests/jax2tf_test.py
|
JonyEpsilon/jax
|
7f9835aa4426690277230dacc5389f7ed42c290e
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2021-07-10T03:59:22.000Z
|
2021-07-10T03:59:22.000Z
|
jax/experimental/jax2tf/tests/jax2tf_test.py
|
JonyEpsilon/jax
|
7f9835aa4426690277230dacc5389f7ed42c290e
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
jax/experimental/jax2tf/tests/jax2tf_test.py
|
JonyEpsilon/jax
|
7f9835aa4426690277230dacc5389f7ed42c290e
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for JAX2TF converted.
Specific JAX primitive conversion tests are in primitives_test."""
from absl.testing import absltest
from absl.testing import parameterized
import jax
from jax import numpy as jnp
from jax import test_util as jtu
from jax.config import config
from jax.experimental import jax2tf
from jax.experimental.jax2tf.tests import tf_test_util
import numpy as np
import tensorflow as tf # type: ignore[import]
config.parse_flags_with_absl()
class Jax2TfTest(tf_test_util.JaxToTfTestCase):
def test_basics(self):
f_jax = lambda x: jnp.sin(jnp.cos(x))
_, res_tf = self.ConvertAndCompare(f_jax, 0.7)
self.assertIsInstance(res_tf, tf.Tensor)
def test_variable_input(self):
f_jax = lambda x: jnp.sin(jnp.cos(x))
f_tf = jax2tf.convert(f_jax)
v = tf.Variable(0.7)
self.assertIsInstance(f_tf(v), tf.Tensor)
self.assertAllClose(f_jax(0.7), f_tf(v))
def test_jit(self):
f_jax = jax.jit(lambda x: jnp.sin(jnp.cos(x)))
self.ConvertAndCompare(f_jax, 0.7)
def test_nested_jit(self):
f_jax = jax.jit(lambda x: jnp.sin(jax.jit(jnp.cos)(x)))
f_tf = jax2tf.convert(f_jax)
np.testing.assert_allclose(f_jax(0.7), f_tf(0.7))
def test_converts_jax_arrays(self):
f_tf = tf.function(lambda x: x)
self.assertEqual(f_tf(jnp.zeros([])).numpy(), 0.)
self.assertEqual(f_tf(jnp.ones([])).numpy(), 1.)
f_tf = tf.function(lambda x: x + x)
self.assertEqual(f_tf(jnp.ones([])).numpy(), 2.)
# Test with ShardedDeviceArray.
n = jax.local_device_count()
mk_sharded = lambda f: jax.pmap(lambda x: x)(f([n]))
f_tf = tf.function(lambda x: x)
self.assertAllClose(f_tf(mk_sharded(jnp.zeros)).numpy(),
np.zeros([n]))
self.assertAllClose(f_tf(mk_sharded(jnp.ones)).numpy(),
np.ones([n]))
@parameterized.named_parameters(jtu.cases_from_list(
dict(testcase_name=f"_dtype={dtype.__name__}_function={with_function}",
with_function=with_function,
dtype=dtype)
for with_function in [False, True]
for dtype in [np.int64, np.float64]))
def test_converts_64bit(self, dtype=np.int64, with_function=False):
if jtu.device_under_test() == "tpu":
# TODO(necula): fix and re-enable this test.
raise self.skipTest("Test is flaky on TPU")
big_const = np.full((5,), 2 ** 33, dtype=dtype)
self.ConvertAndCompare(jnp.sin, big_const)
f_conv = jax2tf.convert(jnp.sin)
if with_function:
f_conv = tf.function(f_conv)
# We check also when we pass tf.Variable or tf.Tensor into the
# converted function
self.assertAllClose(jnp.sin(big_const),
f_conv(tf.Variable(big_const)))
self.assertAllClose(jnp.sin(big_const),
f_conv(tf.constant(big_const)))
def test_function(self):
f_jax = jax.jit(lambda x: jnp.sin(jnp.cos(x)))
self.ConvertAndCompare(f_jax, 0.7, with_function=True)
def test_gradients_disabled(self):
f = jax2tf.convert(jnp.tan)
x = tf.ones([])
with tf.GradientTape() as tape:
tape.watch(x)
y = f(x)
with self.assertRaisesRegex(ValueError,
'jax2tf currently does not support gradients'):
tape.gradient(y, x)
def test_convert_argument_non_callable_error(self):
with self.assertRaisesRegex(TypeError, "Expected a callable value"):
jax2tf.convert(5.)
def test_convert_argument_non_tensor_error(self):
with self.assertRaisesRegex(TypeError,
"Argument.*should be NumPy array"):
jax2tf.convert(lambda x: x)(lambda y: y)
def test_argument_eager_tensor(self):
x = jax2tf.convert(jnp.sin)(1.)
jax2tf.convert(jnp.cos)(x) # No error
def test_checkpoint_wrapper_types(self):
m = tf.Module()
m.a = [tf.Module(), tf.Module()]
m.b = (tf.Module(), tf.Module())
m.c = {'a': tf.Module(), 'b': tf.Module()}
self.assertNotEqual(type(m.a), list)
self.assertNotEqual(type(m.b), tuple)
self.assertNotEqual(type(m.c), dict)
self.assertLen(jax.tree_leaves(m.a), 2)
self.assertLen(jax.tree_leaves(m.b), 2)
self.assertLen(jax.tree_leaves(m.c), 2)
if __name__ == "__main__":
absltest.main()
| 35.279412
| 79
| 0.679658
|
4b0722dff46deebcffa2d33f128b5e131833bc60
| 32,072
|
py
|
Python
|
chapter2.py
|
Yourself1011/pukage
|
809601c355ac4a544a6007c179c7aa6797ebbbe9
|
[
"MIT"
] | 2
|
2020-08-26T22:17:18.000Z
|
2020-09-07T01:05:02.000Z
|
chapter2.py
|
Yourself1011/pukage
|
809601c355ac4a544a6007c179c7aa6797ebbbe9
|
[
"MIT"
] | 1
|
2020-09-08T19:33:36.000Z
|
2020-09-09T15:40:51.000Z
|
chapter2.py
|
Yourself1011/pukage
|
809601c355ac4a544a6007c179c7aa6797ebbbe9
|
[
"MIT"
] | 1
|
2020-08-04T15:43:35.000Z
|
2020-08-04T15:43:35.000Z
|
"""
Pukage
Choose-your-own-adventure game.
https://github.com/Yourself1011/Pukage/
Copyright 2020 Daniel Zhang, Jeffrey Zang, Li Feng Yin, and all Pukage contributors https://github.com/Yourself1011/Pukage/graphs/contributors/
MIT License
"""
from random import randint
from time import sleep
import scrolltype
from scrolltype import scrolltype as scrollType
import waittype as WaitType
from waittype import waittype
from inventory import inv
import readchar.readchar
import readchar.key
from typing import List, Any
from os import system, name
from threading import Thread
import sys
from main import (
showStats,
commitDie,
gotHurt,
gotTired,
gotHungry,
createMenu,
tempEnd,
options,
clearConsole,
fight,
stats,
slept,
healed,
ate,
)
global lastWords
lastWords = "i haven't died yet so yeah"
def Chapter2intro(endThing):
"""does the intro for chapter 2"""
clearConsole()
global goneThroughTrapDoor
goneThroughTrapDoor = False
global sawMan
waittype("Chapter 2")
if endThing == "following man":
follow2()
else:
leavingWithoutFollowingMan()
def follow2():
"""following the man out of your house"""
waittype(
"You stomp out into the street after the man. You see blurry shadows of other buildings and houses. He turns around and sees you following him."
)
waittype(
"He starts to weave around buildings and into shadows, trying to shake you off his tail. You notice that the city is very quiet and clean. No lights or sounds anywhere."
)
waittype(
"He does a few tight turns around some dark alleyways and you lose sight of him. You look around corners and on different streets, but there is no sign of the man."
)
waittype(
"The air is calm and clean. The streets are flat and paved with bricks. There are no lights in windows or people anywhere."
)
waittype(
"Suddenly, you see something moving in the corner of your eye. You turn towards it and see the man enter a building in the distance."
)
options(["Follow the man", "Explore the town"], [follow3, explore])
def follow3():
"""following the man into the building"""
waittype(
"You slowly walk towards the building when you see a light turn on inside. You see the man's silhouette appear for a second."
)
waittype(
"You go around the side of the building and reach the door where the man went in. The building is small but has multiple floors. The door is heavy like those you would find in a warehouse or lab."
)
waittype(
"You hear the man mumbling to himself and making noise. You hear a small generator hum in the back. It makes a low rumbling sound and suddenly stops."
)
waittype(
"The man stops and starts to walk toward the door. You realise that he will see you if you do not move."
)
options(
[
"Hide in the back of the building",
"Hide in the front where the windows are",
"Confront the man",
],
[hide3, hide4, confrontTheMan],
)
def hide3():
"""Hiding in the back of the building when the man comes out to fix the generator"""
waittype(
"You quickly walk over to the back of the building, away from the generator. You hide behind some boxes and wait for the man. "
)
waittype(
"The man walks opens the door with a loud creak and the man steps outside."
)
waittype(
"He walks over to the generator and dumps some fuel into it. It starts humming again and the lights inside turn back on. "
)
waittype("The man walks back into the building.")
global generatorFixed
generatorFixed = True
options(
["Try to find a way into the building", "Explore the city"],
[tryingToGoIn, explore],
)
def tryingToGoIn():
"""trying to find a way into the building after the man fixes the generator"""
waittype(
"You look around the building. There is a small trapdoor that is quite high up near the back. You think that it leads to the second floor. The window in the front doesn't look like it can open. The heavy door is unlocked, and there are some boxes in the back too."
) # oops long waittype
options(
[
"Use the boxes to go into the trapdoor",
"Climb onto the generator to get into the trapdoor",
"Try and break the window",
"Enter through the front door",
"Explore the city",
"Search the boxes",
],
[
getIntoTrapdoorWithBoxes,
getIntoTrapdoorWithGenerator,
breakWindow,
enterThroughDoor,
explore,
searchBoxes,
],
)
def getIntoTrapdoorWithBoxes():
"""trying to get into the trapdoor using the boxes"""
waittype(
"You move some boxes around and position them like a staircase. You climb up and open the trapdoor."
)
waittype(
"You squeeze in and fall out the other side into a completely dark room. You rummage around for the lights and flick it on."
)
waittype(
"The light blinds your eyes, a huge change then the pitch black darkness just seconds before. The whole room is covered with poster and maps, notes and experiments. There is a small bed in the corner and a very messy desk with lots of papers and a laptop."
)
waittype(
"The room feels like a laboratory or an office. You see diagrams of the sun and earth all around. You see models of the solar system, stickers and posters of scienticic terms."
)
waittype(
"You sit at the desk and reach for the computer when you hear the man walking up the stairs."
)
options(
["Exit through the trapdoor", "Try and find somewhere to hide"],
[exitThroughTrapdoor, hide5],
)
def exitThroughTrapdoor():
"""trying to leave through the trapdoor when the man comes in"""
number = randint(1, 2)
if number == 1:
waittype(
"You decide to leave through the trapdoor. You lower yourself down onto the boxes, then drop out of sight just as the man comes in."
)
else:
waittype(
"You quickly open the trapdoor and try to put your feet down onto the boxes. You accidentally kick one of them and fall to the ground in surprise."
)
gotHurt(5)
waittype(
"You quickly hide behind some of the boxes and lay still, your heart beating heavily. You hear the man walk in, take some things, and then turn off the lights and walk out."
)
global goneThroughTrapDoor
goneThroughTrapDoor = True
options(
[
"Try to get back into the building through the trapdoor",
"Search the boxes",
"Go in through the front door",
"Break the front window",
"Explore the city",
],
[goBackInThroughTrapdoor, searchBoxes, breakWindow, explore],
)
def goBackInThroughTrapdoor():
"""going back in through the trapdoor"""
waittype(
"You climb back onto the boxes and go into the trapdoor. The lights are off and it is pitch black inside."
)
options(["Turn on the lights", "Leave"][lightsOn2, leaveRoom])
def lightsOn2():
"""turning on the lights in the trapdoor room"""
tempEnd()
def leaveRoom():
"""leaving the trapdoor room"""
tempEnd()
def hide5():
"""finding somewhere to hide when the man comes in"""
waittype(
"You look around for a hiding place and decide to hide behind the door. The man comes in and takes the laptop. Then, he turns out the lights and leaves."
)
waittype(
"You wait under the desk for a few more minutes while the man goes back downstairs."
)
options(
[
"Search the room", "Exit through the trapdoor",
"Exit through the door"
],
[searchRoom, exitThroughTrapdoor2, exitThroughDoor],
)
def searchRoom():
tempEnd()
def exitThroughTrapdoor2():
tempEnd()
def exitThroughDoor():
tempEnd()
def getIntoTrapdoorWithGenerator():
"""Trying to get into the trapdoor using the gen."""
waittype(
"You step onto the generator and try to pull yourself up. The generator is surprisingly sturdy."
)
waittype(
"You reach for the top of the trapdoor and pull yourself up. Suddenly, the generator breaks of the wall and crashes to the ground."
)
waittype(
"Your fingers slip off of the trapdoor's frame and you fall down with a loud thud."
)
waittype(
'The man rushes out and points a knife at you threateningly. "You!" he says in a deep voice. "What are you doing here?"'
)
gotHurt(5)
options(
["""say "I'm just...exploring?" """, "Try to fight the man"],
[pleaseDontHurtMeImJustExploring, fightMan],
)
def pleaseDontHurtMeImJustExploring():
"""saying pleaseDontHurtMeImJustExploring to the man"""
waittype(
"The man grunts and pulls you off of the ground. Are you part of the Red Horse Gang?"
)
waittype(
'"Uhh...", you mutter.\n"Tell me!", says the man, taking a knife out of his pocket and puts it to your throat. "No!", you scream in horror.'
)
waittype(
'The man lets you go and you fall down to the ground in fright. He pulls you back up and sits down next to you. "Sorry about that. We\'ve always got to make sure, y\'know?"'
)
waittype(
'"Don\'t talk much, do you? Well, I guess I just put a knife to your throat, so. Why don\'t you come in for a bit, have a rest, eh?"'
)
options(["Go inside with the man", 'say, "No thanks."'])
def goInWithTheMan():
waittype(
"The man opens the door to the building and walks inside. You are in some kind of lab with a huge counter surrounding the whole room. There is a large table with many chairs, cans, plates, and paper on it."
)
waittype(
"The man reaches into a small fridge under the counter and pulls out two drinks. He throws one to you and sits down at the table, brushing some papers onto the ground to make space."
)
waittype('"So! Where you from?", the man asks, taking a sip from his can.')
cityCountry = input("Where are you from? (city,country)")
waittype('"I\'m from ' + cityCountry + ' . What about you?"')
waittype(
'"I\'ve been here my whole life. This is my hometown.", said the man.')
waittype('"Ok. What is this place, anyway?"')
waittype(
'"This is Farside. A town on the west coast of Quebec, near Hudson\'s Bay.", said the man. You open the can and take a sip. The cool, refreshing liquid reminds you how thirsty you are.'
)
waittype(
'"Ahh...", said the man. "I think we got off on the wrong foot. Let\'s start over. My name\'s Daniel. People call me the Dan Man."'
)
waittype('"My name is..."')
uName = input("What is your name?")
waittype('"My name is"' + uName + '"."')
waittype('"Well, it\'s kinda late now. Wanna stay the night?"')
options(["Stay the night", 'say, "No thanks."'], [stay2, noThanks2])
def stay2():
randomInt = randint(10, 100)
slept(randomInt)
if randomInt <= 25:
waittype("\nYou wake up from your nightmare, scared but glad that it is over.")
waittype("You were so tired last night that you fell asleep right away. You didn't notice anything in the large bedroom that you are in.")
waittype("You remember Dan from last night. You are glad that he helped you. You look around and see a staircase leading downstairs, a small wardrobe, another door, 2 closed windows with blinds, and a small desk and chair in the corner.")
options(["Go downstairs", "Open the wardrobe", "Go through the door", "Go over to the desk", "Look for the lights", "Open the windows"],[goDownstairs, searchWardrobe3, goThroughDoor, goToDesk, lights2, openWindows])
def goDownstairs():
waittype("You get out of bed and start walking downstairs. Eerie paintings line the walls as you walk down. You enter the lab that you were in last night.")
waittype("You see another door and walk through it, entering a kitchen area. You see a sign on the fridge that says: 'cook food pls'. You think the man is still sleeping upstairs, so you get some food from the fridge and start preparing breakfast.")
waittype("You notice that it is still dark outside, even though you slept, but you don't think much of it. You eat the potatoes that you cooked and brew some coffee.")
waittype("Daniel comes downstairs, looking tired and weary. He pours himself a cup of coffee and helps himself to the potatoes.")
waittype('"Good morning,"he says.')
waittype('"You too," you say.')
waittype('The man swallows and says, "Hmm. This is really good. How did you learn to make it?"\n\n"Um, I\'ve always liked to cook."')
waittype('"So, uh, do you have any other relatives?" asked Daniel. "Like, before all this, if you know what I mean."')
waittype('"Before all this? Before what?"')
waittype('"Well, before the Cactypus Engine failed, of course!" Daniel exclaimed.')
waittype('"What? Cactypus Engine? What? What are you talking about?", you ask.')
waittype('Daniel raises his eyebrows at you. Then, he notices a ring on your pinkie with the letter C engraved on it. "Are you... were you one of the scientists that survived? Are you the person Pam recued? Woah! I-I-I how! What? Wait, then you must know Pam, right?"')
waittype('"What? Who\'s Pam? What are you talking about?"')
waittype('Daniel pauses for a moment and takes a sip of coffee. "Ok. Let me tell you everything."')
goDownstairsContinued()
def goDownstairsContinued():
clearConsole()
waittype('"About 300 years ago, in January of 2051, astronomers and scientists at NASA saw a large asteroid speeding towards Earth. The asteroid was predicted to come close to Earth, but not come in contact with it." Daniel paused and thinked.')
waittype('"This was a long time ago, obviously before you or I were born. The disease of 2020 had long since ended, and everything seemed to be going perfectly. People were inventing new things, self-driving cars were mass-produced, everything was fine. Until it wasn\'t."')
waittype('"When the asteroid came closer, scientists noticed that it was getting pulled toward Earth by gravity. Everyone panicked and it was like the pandemic all over again."')
waittype('"It sped up too quickly and there was no way to stop it. Eventually, it hit right in the Middle East, damaging lots of Asia, Europe, and Africa. Large shockwaves went on for days, the Earth\'s population was almost halved, and it was knocked out of the Sun\'s gravitational pull, speeding out of the solar system."')
waittype("Daniel took another sip of his coffee and made a disgusted face.")
clearConsole()
waittype('"Everyone was terrified. Millions of people died, but a very very smart group of scientists, mechanics, and computer programmers designed and made the Cactypus Engine, a 500,000,000 megawatt engine powered by the sun, the wind, and all of the shockwaves left over from the asteroid."')
waittype('"The Cactypus Engine was finished in 6 months, and was immediately put to use. It created mass so that the large dent left in the Earth by the asteroid was filled in. Then, the Earth would have enough mass to be able to orbit around the sun properly."')
waittype('"For decades, the Cactypus Engine kept the Earth in place and everyone lived happily. The population went back up, and the asteroid dent left in the Earth slowly became habitable. Everything seemed fine. Until it wasn\'t."')
waittype('"Around 3 years ago, a group of former terrorists came together to create the Red Horse Gang, with the goal of causing nothing but chaos and destruction."')
waittype('"At first, they just looted shops and scared old people. But slowly, they grew into bigger and bigger. One day, they came up with something really really evil. They would destory the Cactypus Engine."')
waittype("Daniel gets some cheese from the fridge and puts it into the microwave. He waits for a couple seconds, scoops the melted cheese out of the microwave with his hand, and slurps it up, leaving the excess cheese still inside the microwave.")
clearConsole()
waittype('Daniel continues. "They had a group of the most evil Red Horse People build an insane weapon to destory the engine. All of the people working on it had a ring just like yours. When the explosion happened, many people died, but some lucky scientists survived but lost their memories."')
waittype('"After the Cactypus Engine stopped working, the Earth flew away from the sun and is now just floating around in the solar system towards Jupiter, which is why it\'s always dark here."')
waittype('"Many people died, either from the heat of the sun or the cold of the night every day. The world seemed like it would collapse. But one day, Pam and 3 others decided to try and hunt down the Red Horse Gang themselves. They got me to come, too. We called it Operation Pukage."')
waittype('"We had tracked the Red Horse Gang\'s headquarters to here in Farside. All 10 of us moved into this building. We were up 24 hours a day, researching, looking through old abandoned libraries and things. Suddenly, all of our teammates except for Pam and I disappeared. We were really scared, because it meant that they were onto us. We were much more careful, wearing masks and staying away from people."')
waittype("Daniel stood up and walked around the room.")
clearConsole()
waittype('"One night, Pam went took the small plane that we had and flew to where the Cactypus Engine was. Miraculously, she found you there! She immediately brought you back to your old house, where you woke up yesterday. Right after she got back, she got a call from someone who said that they knew where our missing coworkers were. She called to tell me that she was going to find them. She left immediately."')
waittype("Daniel paused for a moment, thinking, and took a sip of your coffee, making a satisfied face instead of a disgusted one.")
waittype('"When I went to do the daily looting, I went to your house to look for supplies, just like I do on a regular basis. Pretty soon we\'re gonna have to move out of the city since the things are running low. I know someone who\'s importing things from the sOuTh down by the river."')
waittype("Anyway, I was searching your house when you woke up there. You followed me back, and here we are.")
waittype('You sit there for a moment, stunned by everything he has just said. He takes another sip of your coffee.')
options(['ask, "Where is Pam now?"', 'ask, "So...can I help?"'],[wherePam, iHelpPls])
def wherePam():
waittype('"So...where is Pam?" you ask. Daniel pauses for a moment and sighs. "She...hasn\'t come back." Your eyes grow big in horror. "We...we must try and find her!" you exclaim.')
waittype('"Yeah...I\'ve tried, but nothing not even a single clue. The only thing she said was that she was going to a factory of some sort."')
waittype('You look at the clock and realizes that it is noon already. "Oh!" Daniel says. "I better get going." He gets up and goes into the lab room. He rummages around for something. Suddenly, you have an idea. "Hey! I could help you find Pam!"')
waittype('Daniel perked up. "Woah! Why didn\'t I think of that! Of course you can help! Ok, ok, we\'ll get started right away." He seemed to have a big burst of enthusiasm. You smile to yourself. Or maybe it was just the coffee.')
sleep(3)
waittype("After taking a very short shower and changing clothes, you and Daniel go down to the lab to pick up some supplies.")
waittype('"So," Daniel says. "I think we should try to explore the city today and see if we can find the factory with Pam, or see if we can find where the little trade route is."')
options(['Try and find Pam', 'Try and find the trading route'],[letsfindpam, traderoute])
def letsfindpam():
tempEnd()
def traderoute():
tempEnd()
def iHelpPls():
tempEnd()
def searchWardrobe3():
tempEnd()
def goThroughDoor():
tempEnd()
def goToDesk():
tempEnd()
def lights2():
tempEnd()
def openWindows():
tempEnd()
def noThanks2():
tempEnd()
def noThanks():
tempEnd()
def fightMan():
"""trying to fight the man"""
fight(
stats, {
"health": 100,
"maxDamage": 25,
"critChance": 25,
"critMulti": 2,
"defense": 25,
"escapeChance": 2,
})
tempEnd()
def breakWindow():
"""Breaking the window to get in"""
waittype(
"You walk over to the front of the building and duck under the window. You try to look for something to break the window with."
)
waittype(
"You spot a small but sharp piece of loose brick on the road. You pick it up. It feels quite heavy."
)
options(
[
"Try and use the rock to break the window",
"Wait for the man to leave first.",
],
[breakWindow2, waitForMan],
)
def breakWindow2():
global lastWords
lastWords = "You stand up quickly and swing the rock at the window. It shatters, but broken glass shards shoot directly into your face, mouth, and eyes."
gotHurt(100)
def waitForMan():
tempEnd()
def enterThroughDoor():
"""Going through the door to get in"""
waittype(
"You walk over to the door and pull on the handle. You push open the door and "
)
def searchBoxes():
"""Searching the boxes"""
waittype("You rummage through the boxes.")
waittype("*rummage rummage rummage*")
randomItem = randint(1, 3)
randomFood = randint(1, 3)
if randomItem == 1:
item = "nothing"
elif randomItem == 2:
item = "a small wristwatch"
else:
item = "a lighter"
if randomFood == 1:
food = "an apple"
elif randomFood == 2:
food = "a loaf of bread"
else:
food = "a large stick of butter"
waittype("In the first box, you found a " + item + ".")
inv.add(item)
waittype("In the second box, you found " + food +
". Would you like to eat it?")
print("1. Eat")
print("2. Don't eat.")
choice = input()
print(showStats())
if choice == "1" or choice == "2":
if food == "an apple":
gotHungry(-20)
amountGained = "20"
elif food == "a loaf of bread":
gotHungry(-30)
amountGained = "30"
elif food == "a large stick of butter":
gotHungry(-30)
amountGained = "40"
waittype("You ate " + food + " and replenished " + amountGained +
" hunger points.")
options(
[
"Try and go through the trapdoor using the boxes",
"Try and go through the trapdoor using the generator",
"Try to break the window",
"Try to go through the front door",
"Explore the city",
"Try and find another way in",
],
[
getIntoTrapdoorWithBoxes,
getIntoTrapdoorWithGenerator,
breakWindow,
enterThroughDoor,
explore,
findAnotherWay,
],
)
def findAnotherWay():
"""Trying to find another way into the building"""
tempEnd()
def hide4():
"""Hiding in the front of the building where the windows are when the man comes out to fix the generator"""
waittype(
"The man walks out the door just as you get out of sight. You hear metal banging, and see sparks."
)
global generatorFixed
generatorFixed = bool(randint(0, 1))
if generatorFixed:
waittype(
"The man grunts heavily, and stomps his foot in rage. Returns inside, slamming the door."
)
options(["Wait", "Go towards the generator", "Get in the building"])
else:
waittype(
"The hum returns, and you see the lights in the building come back on."
)
waittype(
"Suddenly, you see the generator light up a brilliant blue. You see sparks spark out of it."
)
waittype(
"Frozen in awe, you stay still, until you see something moving. Suddenly, a huge blue spider jumps out from behind the generator. You are in shock."
)
options(["Fight the spider", "Run", "Yell for help"])
tempEnd()
def confrontTheMan():
"""confronting the man when he comes out to fix the generator"""
tempEnd()
def explore():
"""exploring the city"""
waittype("You decide to ignore the man and explore the city.")
waittype(
"You come to a fork in the road. The path to the left leads to a lighthouse while a bank and a restaurant are towards your right. The path in front of you leads to the building the man went in."
)
waittype(
"The building where the man went is small but has multiple floors. The lighthouse and the house you came from are quite far away. You see a light turn on in the man's building."
)
waittype(
"The building with the man is small but has multiple floors. The lighthouse and your house are quite far away. You see a light turn on in the man's building."
)
options(
[
"Go into the building with the man",
"Go to the lighthouse",
"Turn back and return to your house",
"Go to the restaurant",
"Go to the bank",
],
[follow3, lighthouse, backToHouse, restaurant, bank],
)
def backToHouse():
"""Going back home for some reason"""
tempEnd()
def restaurant():
"""going to the restaurant"""
waittype(
"You decide to take the right fork and head towards the restaurant.")
waittype(
"You hear your footsteps rapping against the dirt path. You see the restaurant and the bank in the distance. You reach a road and hear your feet hit the tarmac."
)
waittype(
"You keep walking along the road, your feet barely making any sound. As you near the parking lot, you notice that the windows of both buildings are smashed and glass shards litter the ground."
)
waittype(
"You step inside the parking lot, which stretches around the entire building. But all of a sudden, you hear voices coming from the back of the restaurant.")
options([
"Investigate the noise", "Hide", "Enter the restaurant"
"Turn back and run"
],
[
investigateNoise, hideNearRestaurant, enterRestaurant,
runFromRestaurant
])
def investigateNoise():
"""Investigating noise coming from the back of the restaurant"""
waittype(
"You sneak towards the back of the restaurant, dodging the glass pieces being illuminated by the bright moon. You poke your head around the corner and see..."
)
tempEnd()
def hideNearRestaurant():
"""Searching for hiding places near the restaurant"""
waittype(
"You scan the area for hiding places. You see some thorny bushes near the parking lot where you are. The bushes are thick with green leaves and thorns. It might be able to conceal you."
)
waittype(
"You notice you can't see the restaurant interior because its pitch-dark and no lights are turned on. It would be a good place to hide, as long as nobody enters the restaurant."
)
waittype(
"The voices amplify in sound."
)
options([
"Hide in the bushes", "Stay where you are", "Enter the restaurant",
"Turn back and run"
], [hideInBushes, stayAtRestaurant, enterRestaurant, runFromRestaurant])
def hideInBushes():
"""Hiding in some bushes near the restaurant"""
waittype(
"You sneak towards a cluster of green bushes, making sure the person or people at the back of building can't see you."
)
waittype(
"You drop to your knees and bury yourself in the bushes. The sharp thorns scratch your skin. You wince in pain."
)
gotHurt(2)
waittype(
"You try to see through the green thorns and bushes. You can see the outline of a person, who seems to be the leader, speaking to a large group of people.",
"You can barely hear them, but you catch a phrase every now and then.",
"...yeah, that's true...",
"...the Earth...",
"...don't know why...",
"...Red Horse Gang...",
)
tempEnd()
def runFromRestaurant():
"""Running away from restaurant"""
if (randint(1, 3) == 1):
waittype(
"You turn around and run as fast as your legs could carry you. You reach the dirt path and slip because you ran too fast. You scrape your knee."
)
hurtAmount = gotHurt(randint(1, 5))
if hurtAmount < 5:
waittype(
"You look at your knee. You see a faint white line where you fell. You get up and look behind you, nothing in sight.",
"You walk at a fast pace, looking at the lighthouse in the distance. You reach the same fork in the road that you saw earier."
)
else:
waittype(
"You examine your knee, which is pink and bleeding. "
)
else:
tempEnd()
#options([follow3, lighthouse, backToHouse, restaurant, bank]) #uh
def stayAtRestaurant():
waittype("")
def enterRestaurant():
"""Entering the restaurant"""
waittype(
"You head towards the door of the restaurant, situated near the left side of the building."
)
def lighthouse():
"""going to explore the lighthouse"""
waittype("You start to walk over to the lighthouse.")
waittype("*walk walk walk*")
waittype(
"You get closer to the lighthouse and notice some other buildings. One has a staircase that leads to the second floor. You also notice a dock, with sailboats, jetskis, and other big boats."
)
waittype(
"You keep walking and notice that you have entered a small neighbourhood. The gate and fence around it has been compeletely demolished."
)
waittype(
"You see large apartment buildings in the distance. The windows are shattered and the walls are crumbing. You see no sign of life."
)
waittype(
"You notice that the sky is brighter here. You can see the lighthouse clearly. You notice that it is a one-way road and that it is paved with concrete instead of bricks."
)
options(
[
"Go to the lighthouse",
"Go to the dock",
"Go to the building with the outdoor staircase",
"Go to the apartments",
"Go to the bank",
"Go to the restaurant",
"Go to the building with the man",
"Go back to your house",
],
[
lighthouse2,
dock,
staircaseBuilding,
apartments,
bank,
restaurant,
follow3,
backToHouse,
],
)
def lighthouse2():
"""actually going to the lighthouse"""
waittype("")
def dock():
"""going to the dock"""
tempEnd()
def staircaseBuilding():
"""Going to the staircaseBuilding"""
tempEnd()
def apartments():
"""going to the apartments"""
tempEnd()
def bank():
"""going to the bank"""
tempEnd()
def leavingWithoutFollowingMan():
"""leaving the house without following the man"""
waittype(
"The air outside is cold, but calm You shiver and move the blanket draped around your neck to cover as much of your body as possible."
)
waittype(
"You look around, but you can barely see anything. The only light sources come from the moon, hovering in the sky, and the stars, scattered across the night sky."
)
waittype(
"You start to take a small walk around the city. You notice that everything is eerily clean."
)
waittype("Suddenly, you hear a noise")
| 34.082891
| 415
| 0.6665
|
0b47a25c7eb63529d60273ab2609fc99064953fc
| 2,928
|
py
|
Python
|
alipay/aop/api/domain/AlipayFundTransOrderCloseModel.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | 213
|
2018-08-27T16:49:32.000Z
|
2021-12-29T04:34:12.000Z
|
alipay/aop/api/domain/AlipayFundTransOrderCloseModel.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | 29
|
2018-09-29T06:43:00.000Z
|
2021-09-02T03:27:32.000Z
|
alipay/aop/api/domain/AlipayFundTransOrderCloseModel.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | 59
|
2018-08-27T16:59:26.000Z
|
2022-03-25T10:08:15.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayFundTransOrderCloseModel(object):
def __init__(self):
self._biz_scene = None
self._close_reason = None
self._order_id = None
self._out_biz_no = None
self._product_code = None
@property
def biz_scene(self):
return self._biz_scene
@biz_scene.setter
def biz_scene(self, value):
self._biz_scene = value
@property
def close_reason(self):
return self._close_reason
@close_reason.setter
def close_reason(self, value):
self._close_reason = value
@property
def order_id(self):
return self._order_id
@order_id.setter
def order_id(self, value):
self._order_id = value
@property
def out_biz_no(self):
return self._out_biz_no
@out_biz_no.setter
def out_biz_no(self, value):
self._out_biz_no = value
@property
def product_code(self):
return self._product_code
@product_code.setter
def product_code(self, value):
self._product_code = value
def to_alipay_dict(self):
params = dict()
if self.biz_scene:
if hasattr(self.biz_scene, 'to_alipay_dict'):
params['biz_scene'] = self.biz_scene.to_alipay_dict()
else:
params['biz_scene'] = self.biz_scene
if self.close_reason:
if hasattr(self.close_reason, 'to_alipay_dict'):
params['close_reason'] = self.close_reason.to_alipay_dict()
else:
params['close_reason'] = self.close_reason
if self.order_id:
if hasattr(self.order_id, 'to_alipay_dict'):
params['order_id'] = self.order_id.to_alipay_dict()
else:
params['order_id'] = self.order_id
if self.out_biz_no:
if hasattr(self.out_biz_no, 'to_alipay_dict'):
params['out_biz_no'] = self.out_biz_no.to_alipay_dict()
else:
params['out_biz_no'] = self.out_biz_no
if self.product_code:
if hasattr(self.product_code, 'to_alipay_dict'):
params['product_code'] = self.product_code.to_alipay_dict()
else:
params['product_code'] = self.product_code
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayFundTransOrderCloseModel()
if 'biz_scene' in d:
o.biz_scene = d['biz_scene']
if 'close_reason' in d:
o.close_reason = d['close_reason']
if 'order_id' in d:
o.order_id = d['order_id']
if 'out_biz_no' in d:
o.out_biz_no = d['out_biz_no']
if 'product_code' in d:
o.product_code = d['product_code']
return o
| 28.990099
| 75
| 0.592213
|
a334f3a02d2bc2eb5c69c1a42bf6f0931b0ce97f
| 309
|
py
|
Python
|
337-Combination-Sum-IV/solution.py
|
alfmunny/leetcode
|
e35d2164c7e6e66410309fe1667ceab5a7689bef
|
[
"MIT"
] | null | null | null |
337-Combination-Sum-IV/solution.py
|
alfmunny/leetcode
|
e35d2164c7e6e66410309fe1667ceab5a7689bef
|
[
"MIT"
] | null | null | null |
337-Combination-Sum-IV/solution.py
|
alfmunny/leetcode
|
e35d2164c7e6e66410309fe1667ceab5a7689bef
|
[
"MIT"
] | null | null | null |
class Solution:
def combinationSum4(self, nums, target):
dp = [0] * (target + 1)
dp[0] = 1
for i in range(target+1):
for j in nums:
if i < j:
continue
dp[i] += dp[i-j]
return dp[-1]
| 23.769231
| 44
| 0.372168
|
e42814bf50a1c03b22c45861b9531d6b3f50f65a
| 576
|
py
|
Python
|
src/shooopie/store/migrations/0003_siteconfig.py
|
martdi/shooopie
|
50fe0151b35fe8f628893e7d11a82a6b4072ec03
|
[
"MIT"
] | null | null | null |
src/shooopie/store/migrations/0003_siteconfig.py
|
martdi/shooopie
|
50fe0151b35fe8f628893e7d11a82a6b4072ec03
|
[
"MIT"
] | 1
|
2022-01-19T19:44:59.000Z
|
2022-01-19T19:50:22.000Z
|
src/shooopie/store/migrations/0003_siteconfig.py
|
martdi/shooopie
|
50fe0151b35fe8f628893e7d11a82a6b4072ec03
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.8 on 2021-10-14 00:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('store', '0002_inital_data'),
]
operations = [
migrations.CreateModel(
name='SiteConfig',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('tagline', models.CharField(max_length=255)),
],
),
]
| 26.181818
| 117
| 0.576389
|
c58ad26112d896ede6cb2e0550d1ea74ada041b6
| 619
|
py
|
Python
|
homeassistant/components/nest/sensor.py
|
mikan-megane/core
|
837220cce40890e296920d33a623adbc11bd15a6
|
[
"Apache-2.0"
] | 11
|
2018-02-16T15:35:47.000Z
|
2020-01-14T15:20:00.000Z
|
homeassistant/components/nest/sensor.py
|
jagadeeshvenkatesh/core
|
1bd982668449815fee2105478569f8e4b5670add
|
[
"Apache-2.0"
] | 79
|
2020-07-23T07:13:37.000Z
|
2022-03-22T06:02:37.000Z
|
homeassistant/components/nest/sensor.py
|
jagadeeshvenkatesh/core
|
1bd982668449815fee2105478569f8e4b5670add
|
[
"Apache-2.0"
] | 6
|
2018-02-04T03:48:55.000Z
|
2022-01-24T20:37:04.000Z
|
"""Support for Nest sensors that dispatches between API versions."""
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from .const import DATA_SDM
from .legacy.sensor import async_setup_legacy_entry
from .sensor_sdm import async_setup_sdm_entry
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities
) -> None:
"""Set up the sensors."""
if DATA_SDM not in entry.data:
await async_setup_legacy_entry(hass, entry, async_add_entities)
return
await async_setup_sdm_entry(hass, entry, async_add_entities)
| 30.95
| 71
| 0.783522
|
33c7511b2659a3a57e7e275265ce5a3894055adb
| 2,514
|
py
|
Python
|
examples/dagster_examples_tests/airline_demo_tests/test_airflow.py
|
flowersw/dagster
|
0de6baf2bd6a41bfacf0be532b954e23305fb6b4
|
[
"Apache-2.0"
] | 3
|
2020-09-09T04:10:23.000Z
|
2021-11-08T02:10:42.000Z
|
examples/dagster_examples_tests/airline_demo_tests/test_airflow.py
|
flowersw/dagster
|
0de6baf2bd6a41bfacf0be532b954e23305fb6b4
|
[
"Apache-2.0"
] | 2
|
2021-05-11T13:36:27.000Z
|
2021-09-03T01:53:11.000Z
|
examples/dagster_examples_tests/airline_demo_tests/test_airflow.py
|
flowersw/dagster
|
0de6baf2bd6a41bfacf0be532b954e23305fb6b4
|
[
"Apache-2.0"
] | null | null | null |
# pylint: disable=redefined-outer-name
import os
import pytest
from dagster_examples.airline_demo.pipelines import ( # pylint: disable=unused-import
define_airline_demo_ingest_pipeline,
define_airline_demo_warehouse_pipeline,
)
from dagster import ExecutionTargetHandle
from dagster.utils import script_relative_path
try:
# pylint: disable=unused-import
from dagster_airflow.test_fixtures import dagster_airflow_python_operator_pipeline
except ImportError:
pass
@pytest.mark.slow
@pytest.mark.airflow
class TestAirflowPython_0IngestExecution(object):
handle = ExecutionTargetHandle.for_pipeline_fn(define_airline_demo_ingest_pipeline)
pipeline_name = 'airline_demo_ingest_pipeline'
environment_yaml = [
script_relative_path(
os.path.join(
'..', '..', 'dagster_examples', 'airline_demo', 'environments', 'local_base.yaml'
)
),
script_relative_path(
os.path.join(
'..', '..', 'dagster_examples', 'airline_demo', 'environments', 's3_storage.yaml'
)
),
script_relative_path(
os.path.join(
'..',
'..',
'dagster_examples',
'airline_demo',
'environments',
'local_fast_ingest.yaml',
)
),
]
mode = 'local'
def test_airflow_run_ingest_pipeline(self, dagster_airflow_python_operator_pipeline):
pass
@pytest.mark.slow
@pytest.mark.airflow
class TestAirflowPython_1WarehouseExecution(object):
handle = ExecutionTargetHandle.for_pipeline_fn(define_airline_demo_warehouse_pipeline)
pipeline_name = 'airline_demo_warehouse_pipeline'
environment_yaml = [
script_relative_path(
os.path.join(
'..', '..', 'dagster_examples', 'airline_demo', 'environments', 'local_base.yaml'
)
),
script_relative_path(
os.path.join(
'..', '..', 'dagster_examples', 'airline_demo', 'environments', 's3_storage.yaml'
)
),
script_relative_path(
os.path.join(
'..',
'..',
'dagster_examples',
'airline_demo',
'environments',
'local_warehouse.yaml',
)
),
]
mode = 'local'
def test_airflow_run_warehouse_pipeline(self, dagster_airflow_python_operator_pipeline):
pass
| 29.928571
| 97
| 0.610581
|
89fca28f5f46c13fb35bd69126752b22de8347b6
| 19,857
|
py
|
Python
|
xsamtools/cram.py
|
xbrianh/xsamtools
|
12a44d3183a7ffa603e630a96580d3c0d5eec1a7
|
[
"MIT"
] | 1
|
2020-06-09T17:02:00.000Z
|
2020-06-09T17:02:00.000Z
|
xsamtools/cram.py
|
DataBiosphere/xsamtools
|
08fdc7309645f949e07e2f17a2d111863da82ba4
|
[
"MIT"
] | 9
|
2020-08-13T23:21:25.000Z
|
2021-01-08T20:59:22.000Z
|
xsamtools/cram.py
|
xbrianh/xsamtools
|
12a44d3183a7ffa603e630a96580d3c0d5eec1a7
|
[
"MIT"
] | null | null | null |
"""
Tools for working with CRAM files.
CRAM/CRAI spec here:
http://samtools.github.io/hts-specs/CRAMv3.pdf
"""
import os
import datetime
import logging
import gzip
import io
from collections import namedtuple
from tempfile import TemporaryDirectory
from typing import Optional, Dict, Any, Union
from urllib.request import urlretrieve
from terra_notebook_utils import xprofile
from xsamtools import gs_utils
from xsamtools.utils import run
CramLocation = namedtuple("CramLocation", "chr alignment_start alignment_span offset slice_offset slice_size")
log = logging.getLogger(__name__)
def read_fixed_length_cram_file_definition(fh: io.BytesIO) -> Dict[str, Union[int, str]]:
"""
This definition is always the first 26 bytes of a cram file.
From CRAM spec 3.0 (22 Jun 2020):
-------------------------------------------------------------------------------------------------
| Data type Name Value |
-------------------------------------------------------------------------------------------------
| byte[4] format magic number CRAM (0x43 0x52 0x41 0x4d) |
| unsigned byte major format number 3 (0x3) |
| unsigned byte minor format number 0 (0x0) |
| byte[20] file id CRAM file identifier (e.g. file name or SHA1 checksum) |
-------------------------------------------------------------------------------------------------
Valid CRAM major.minor version numbers are as follows:
1.0 The original public CRAM release.
2.0 The first CRAM release implemented in both Java and C; tidied up implementation vs specification
differences in 1.0.
2.1 Gained end of file markers; compatible with 2.0.
3.0 Additional compression methods; header and data checksums; improvements for unsorted data.
"""
return {
'cram': fh.read(4).decode('utf-8'),
'major_version': decode_int8(fh),
'minor_version': decode_int8(fh),
'file_id': fh.read(20).decode('utf-8')
}
def read_cram_container_header(fh: io.BytesIO) -> Dict[str, Any]:
"""
From an open BytesIO handle, returns a dictionary of the contents of a CRAM container header.
The file definition is followed by one or more containers with the following header structure where the container
content is stored in the ‘blocks’ field:
-----------------------------------------------------------------------------------------------------------
| Data Type Name Value |
-----------------------------------------------------------------------------------------------------------
| INT32 length The sum of the lengths of all blocks in this container |
| (headers and data); equal to the total byte length of the |
| container minus the byte length of this header structure. |
| ITF-8 reference sequence id Reference sequence identifier or: |
| -1 for unmapped reads |
| -2 for multiple reference sequences |
| All slices in this container must have a reference sequence |
| id matching this value. |
| ITF-8 reference start position The alignment start position or 0 if the container is |
| multiple-reference or contains unmapped unplaced reads |
| ITF-8 alignment span The length of the alignment or 0 if the container is |
| multiple-reference or contains unmapped unplaced reads. |
| ITF-8 number of records Number of records in the container. |
| LTF-8 record counter 1-based sequential index of records in the file/stream. |
| LTF-8 bases Number of read bases. |
| ITF-8 number of blocks The total number of blocks in this container. |
| Array<ITF-8> landmarks The locations of slices in this container as byte offsets |
| from the end of this container header, used for random |
| access indexing. The landmark count must equal the slice |
| count. Since the block before the first slice is the |
| compression header, landmarks[0] is equal to the byte |
| length of the compression header. |
| INT crc32 CRC32 hash of the all the preceding bytes in the container. |
-----------------------------------------------------------------------------------------------------------
"""
return {
"length": decode_int32(fh),
"reference_sequence_id": decode_itf8(fh),
"starting_position": decode_itf8(fh),
"alignment_span": decode_itf8(fh),
"number_of_records": decode_itf8(fh),
"record_counter": decode_ltf8(fh),
"bases": decode_ltf8(fh),
"number_of_blocks": decode_itf8(fh),
"landmark": decode_itf8_array(fh),
"crc_hash": fh.read(4)
}
def decode_int32(fh: io.BytesIO) -> int:
"""A CRAM defined 32-bit signed integer type."""
return int.from_bytes(fh.read(4), byteorder='little', signed=True)
def decode_int8(fh: io.BytesIO) -> int:
"""
Read a single byte as an unsigned integer.
This data type isn't given a special name like "ITF-8" or "int32" in the spec, and is only used twice in the
file descriptor as a special case, and as a convenience to construct other data types, like ITF-8 and LTF-8.
"""
return int.from_bytes(fh.read(1), byteorder='little', signed=False)
def decode_itf8(fh: io.BytesIO) -> int:
"""
* Decode int values with CRAM's ITF8 protocol.
*
* ITF8 encodes ints as 1 to 5 bytes depending on the highest set bit.
*
* (using 1-based counting)
* If highest bit < 8:
* write out [bits 1-8]
* Highest bit = 8-14:
* write a byte 1,0,[bits 9-14]
* write out [bits 1-8]
* Highest bit = 15-21:
* write a byte 1,1,0,[bits 17-21]
* write out [bits 9-16]
* write out [bits 1-8]
* Highest bit = 22-28:
* write a byte 1,1,1,0,[bits 25-28]
* write out [bits 17-24]
* write out [bits 9-16]
* write out [bits 1-8]
* Highest bit > 28:
* write a byte 1,1,1,1,[bits 29-32]
* write out [bits 21-28] **** note the change in pattern here
* write out [bits 13-20]
* write out [bits 5-12]
* write out [bits 1-8]
Source: https://github.com/samtools/htsjdk/blob/b24c9521958514c43a121651d1fdb2cdeb77cc0b/src/main/java/htsjdk/samtools/cram/io/ITF8.java#L12 # noqa
"""
int1 = decode_int8(fh)
if (int1 & 128) == 0:
return int1
elif (int1 & 64) == 0:
int2 = decode_int8(fh)
return ((int1 & 127) << 8) | int2
elif (int1 & 32) == 0:
int2 = decode_int8(fh)
int3 = decode_int8(fh)
return ((int1 & 63) << 16) | int2 << 8 | int3
elif (int1 & 16) == 0:
int2 = decode_int8(fh)
int3 = decode_int8(fh)
int4 = decode_int8(fh)
return ((int1 & 31) << 24) | int2 << 16 | int3 << 8 | int4
else:
int2 = decode_int8(fh)
int3 = decode_int8(fh)
int4 = decode_int8(fh)
int5 = decode_int8(fh)
return ((int1 & 15) << 28) | int2 << 20 | int3 << 12 | int4 << 4 | (15 & int5)
def encode_itf8(num: int) -> bytes:
"""
* Encodes int values with CRAM's ITF8 protocol.
*
* ITF8 encodes ints as 1 to 5 bytes depending on the highest set bit.
*
* (using 1-based counting)
* If highest bit < 8:
* write out [bits 1-8]
* Highest bit = 8-14:
* write a byte 1,0,[bits 9-14]
* write out [bits 1-8]
* Highest bit = 15-21:
* write a byte 1,1,0,[bits 17-21]
* write out [bits 9-16]
* write out [bits 1-8]
* Highest bit = 22-28:
* write a byte 1,1,1,0,[bits 25-28]
* write out [bits 17-24]
* write out [bits 9-16]
* write out [bits 1-8]
* Highest bit > 28:
* write a byte 1,1,1,1,[bits 29-32]
* write out [bits 21-28] **** note the change in pattern here
* write out [bits 13-20]
* write out [bits 5-12]
* write out [bits 1-8]
Source: https://github.com/samtools/htsjdk/blob/b24c9521958514c43a121651d1fdb2cdeb77cc0b/src/main/java/htsjdk/samtools/cram/io/ITF8.java#L12 # noqa
"""
if num < 2 ** 7:
integers = [num]
elif num < 2 ** 14:
integers = [((num >> 8) | 0x80), (num & 0xFF)]
elif num < 2 ** 21:
integers = [((num >> 16) | 0xC0), ((num >> 8) & 0xFF), (num & 0xFF)]
elif num < 2 ** 28:
integers = [((num >> 24) | 0xE0), ((num >> 16) & 0xFF), ((num >> 8) & 0xFF), (num & 0xFF)]
elif num < 2 ** 32:
integers = [((num >> 28) | 0xF0), ((num >> 20) & 0xFF), ((num >> 12) & 0xFF), ((num >> 4) & 0xFF), (num & 0xFF)]
else:
raise ValueError('Number is too large for an unsigned 32-bit integer.')
return bytes(integers)
def decode_ltf8(fh: io.BytesIO) -> int:
"""
Decode integer values with CRAM's LTF-8 protocol (Long Transformation Format - 8 bit).
- LTF-8 represents a 64-bit Long Unsigned Integer (the long version of the 32-bit ITF-8).
- LTF-8 allocates 1-9 bytes to store integers, and ITF-8 only allocates 1-5 bytes.
Source: https://github.com/samtools/htsjdk/blob/b24c9521958514c43a121651d1fdb2cdeb77cc0b/src/main/java/htsjdk/samtools/cram/io/LTF8.java # noqa
"""
int1 = decode_int8(fh)
# same as itf8
if (int1 & 128) == 0:
return int1
# same as itf8
elif (int1 & 64) == 0:
int2 = decode_int8(fh)
return (int1 & 127) << 8 | int2
# same as itf8
elif (int1 & 32) == 0:
int2 = decode_int8(fh)
int3 = decode_int8(fh)
return (int1 & 63) << 16 | int2 << 8 | int3
# same as itf8
elif (int1 & 16) == 0:
int2 = decode_int8(fh)
int3 = decode_int8(fh)
int4 = decode_int8(fh)
return (int1 & 31) << 24 | int2 << 16 | int3 << 8 | int4
# differs from itf8; doesn't truncate 4 bytes
elif (int1 & 8) == 0:
int2 = decode_int8(fh)
int3 = decode_int8(fh)
int4 = decode_int8(fh)
int5 = decode_int8(fh)
return (int1 & 15) << 32 | (0xFF & int2) << 24 | int3 << 16 | int4 << 8 | int5
# this is where the number gets too big for itf8
elif (int1 & 4) == 0:
int2 = decode_int8(fh)
int3 = decode_int8(fh)
int4 = decode_int8(fh)
int5 = decode_int8(fh)
int6 = decode_int8(fh)
return (int1 & 7) << 40 | (0xFF & int2) << 32 | (0xFF & int3) << 24 | (int4 << 16) | (int5 << 8) | int6
elif (int1 & 2) == 0:
int2 = decode_int8(fh)
int3 = decode_int8(fh)
int4 = decode_int8(fh)
int5 = decode_int8(fh)
int6 = decode_int8(fh)
int7 = decode_int8(fh)
return (int1 & 3) << 48 | (0xFF & int2) << 40 | (0xFF & int3) << 32 | \
(0xFF & int4) << 24 | int5 << 16 | int6 << 8 | int7
# NOTE: int1 is unused here!
elif (int1 & 1) == 0:
int2 = decode_int8(fh)
int3 = decode_int8(fh)
int4 = decode_int8(fh)
int5 = decode_int8(fh)
int6 = decode_int8(fh)
int7 = decode_int8(fh)
int8 = decode_int8(fh)
return (0xFF & int2) << 48 | (0xFF & int3) << 40 | (0xFF & int4) << 32 | \
(0xFF & int5) << 24 | int6 << 16 | int7 << 8 | int8
# NOTE: int1 is also unused here!
else:
int2 = decode_int8(fh)
int3 = decode_int8(fh)
int4 = decode_int8(fh)
int5 = decode_int8(fh)
int6 = decode_int8(fh)
int7 = decode_int8(fh)
int8 = decode_int8(fh)
int9 = decode_int8(fh)
return (0xFF & int2) << 56 | (0xFF & int3) << 48 | (0xFF & int4) << 40 | (0xFF & int5) << 32 | \
(0xFF & int6) << 24 | int7 << 16 | int8 << 8 | int9
def encode_ltf8(num: int) -> bytes:
"""
Encode integer values with CRAM's LTF-8 protocol (Long Transformation Format - 8 bit).
LTF-8 represents a 64-bit Long Unsigned Integer (the long version of ITF-8).
The main difference between ITF-8 and LTF-8 is the number of bytes used to encode a single value.
LTF-8 allocates 1-9 bytes to store integers, and ITF-8 only allocates 1-5 bytes.
Source: https://github.com/samtools/htsjdk/blob/b24c9521958514c43a121651d1fdb2cdeb77cc0b/src/main/java/htsjdk/samtools/cram/io/LTF8.java # noqa
"""
if num >> 7 == 0:
integers = [num]
elif num >> 14 == 0:
integers = [((num >> 8) | 0x80), num & 0xFF]
elif num >> 21 == 0:
integers = [((num >> 16) | 0xC0), (num >> 8) & 0xFF, num & 0xFF]
elif num >> 28 == 0:
integers = [((num >> 24) | 0xE0), (num >> 16) & 0xFF, (num >> 8) & 0xFF, num & 0xFF]
elif num >> 35 == 0:
# differs from itf8; doesn't truncate 4 bytes
integers = [((num >> 32) | 0xF0), (num >> 24) & 0xFF, (num >> 16) & 0xFF, (num >> 8) & 0xFF, num & 0xFF]
elif num >> 42 == 0:
# this is where the number gets too big for itf8
integers = [((num >> 40) | 0xF8), (num >> 32) & 0xFF, (num >> 24) & 0xFF, (num >> 16) & 0xFF,
(num >> 8) & 0xFF, num & 0xFF]
elif num >> 49 == 0:
integers = [((num >> 48) | 0xFC), (num >> 40) & 0xFF, (num >> 32) & 0xFF, (num >> 24) & 0xFF,
(num >> 16) & 0xFF, (num >> 8) & 0xFF, num & 0xFF]
elif num >> 56 == 0:
# note the first byte here is constant
integers = [0xFE, (num >> 48) & 0xFF, (num >> 40) & 0xFF, (num >> 32) & 0xFF, (num >> 24) & 0xFF,
(num >> 16) & 0xFF, (num >> 8) & 0xFF, num & 0xFF]
elif num >> 64 == 0:
# note the first byte here is constant
integers = [0xFF, (num >> 56) & 0xFF, (num >> 48) & 0xFF, (num >> 40) & 0xFF, (num >> 32) & 0xFF,
(num >> 24) & 0xFF, (num >> 16) & 0xFF, (num >> 8) & 0xFF, num & 0xFF]
else:
raise ValueError(f'Number is too large for an unsigned 64-bit integer: {num}')
return bytes(integers)
def decode_itf8_array(handle: io.BytesIO, size: Optional[int] = None):
"""
Decodes an itf8 array from a BytesIO stream.
The spec either defines the length of the expected array as the first byte of the BytesIO stream...
OR it's explicitly in the spec (e.g. Array[4] always has a length of four), so sometimes we need to rely on the
specification itself to document the array size and sometimes we can only determine the size from the CRAM file.
"""
if size is None:
size = decode_itf8(handle)
return [decode_itf8(handle) for _ in range(size)]
def get_crai_indices(crai):
crai_indices = []
with open(crai, "rb") as fh:
with gzip.GzipFile(fileobj=fh) as gzip_reader:
with io.TextIOWrapper(gzip_reader, encoding='ascii') as reader:
for line in reader:
crai_indices.append(CramLocation(*[int(d) for d in line.split("\t")]))
return crai_indices
def download_full_gs(gs_path: str, output_filename: str = None) -> str:
# TODO: use gs_chunked_io instead
bucket_name, key_name = gs_path[len('gs://'):].split('/', 1)
output_filename = output_filename if output_filename else os.path.abspath(os.path.basename(key_name))
blob = gs_utils._blob_for_url(gs_path)
blob.download_to_filename(output_filename)
log.debug(f'Entire file "{gs_path}" downloaded to: {output_filename}')
return output_filename
def format_and_check_cram(cram: str) -> str:
if cram.startswith('file://'):
cram = cram[len('file://'):]
if ':' in cram:
raise NotImplementedError(f'Unsupported schema: {cram}')
cram = os.path.abspath(cram)
assert os.path.exists(cram)
return cram
def write_final_file_with_samtools(cram: str,
crai: Optional[str],
regions: Optional[str],
cram_format: bool,
output: str) -> None:
region_args = ' '.join(regions.split(',')) if regions else ''
cram_format_arg = '-C' if cram_format else ''
if crai:
crai_arg = f'-X {crai}'
else:
log.warning('No crai file present, this may take a while.')
crai_arg = ''
# we can get away with a simple split on spaces here because there's nothing complicated going on
cmd = f'samtools view {cram_format_arg} {cram} {crai_arg} {region_args}'.split()
log.info(f'Now running: {cmd}')
run(cmd, stdout=open(output, 'w'), check=True)
log.debug(f'Output CRAM successfully generated at: {output}')
def stage(uri: str, output: str) -> None:
"""
Make a file available locally for samtools to use.
This also allows the file to be placed in the same folder as associated
files, like cram and crai, which samtools can be picky about.
"""
if uri.startswith('gs://'):
download_full_gs(uri, output_filename=output)
elif uri.startswith('file://'):
if os.path.abspath(uri[len('file://'):]) != os.path.abspath(output):
os.link(uri[len('file://'):], output)
elif uri.startswith('http://') or uri.startswith('https://'):
urlretrieve(uri, output)
elif ':' not in uri:
if os.path.abspath(uri) != os.path.abspath(output):
os.link(uri, output)
else:
raise NotImplementedError(f'Unsupported format: {uri}')
def timestamped_filename(cram_format: bool) -> str:
time_stamp = datetime.datetime.now().strftime("%Y-%m-%d-%H%M%S")
extension = 'cram' if cram_format else 'sam'
return os.path.abspath(f'{time_stamp}.output.{extension}')
@xprofile.profile("xsamtools cram view")
def view(cram: str,
crai: Optional[str],
regions: Optional[str],
output: Optional[str] = None,
cram_format: bool = True) -> str:
output = output or timestamped_filename(cram_format)
output = output[len('file://'):] if output.startswith('file://') else output
assert ':' not in output, f'Unsupported schema for output: "{output}".\n' \
f'Only local file outputs are currently supported.'
with TemporaryDirectory() as staging_dir:
staged_cram = os.path.join(staging_dir, 'tmp.cram')
stage(uri=cram, output=staged_cram)
if crai:
staged_crai = os.path.join(staging_dir, 'tmp.crai')
stage(uri=crai, output=staged_crai)
else:
staged_crai = None
write_final_file_with_samtools(staged_cram, staged_crai, regions, cram_format, output)
return output
| 44.925339
| 152
| 0.535882
|
e71a2d233ab48764eda6700cc8efad1051d6144d
| 774
|
py
|
Python
|
middletier/dal/filesystem_dal.py
|
amoldms/BloomFilter
|
49acd05751886fb67e454568eb4b58869b9d5959
|
[
"MIT"
] | null | null | null |
middletier/dal/filesystem_dal.py
|
amoldms/BloomFilter
|
49acd05751886fb67e454568eb4b58869b9d5959
|
[
"MIT"
] | null | null | null |
middletier/dal/filesystem_dal.py
|
amoldms/BloomFilter
|
49acd05751886fb67e454568eb4b58869b9d5959
|
[
"MIT"
] | null | null | null |
import middletier.logger as logger
import configparser
import sys
def get_data(filepath):
try:
with open(filepath, 'r') as file:
document = file.read().replace('\n', ' ')
return document
except NameError as e:
logger.log("File not found:{}".format(str(e)))
sys.exit(1)
except IOError as e:
logger.log("Error in file reading:{}".format(str(e)))
sys.exit(1)
def read_config(filepath):
try:
config = configparser.ConfigParser()
config.read(filepath)
return config
except NameError as e:
logger.log("File not found:{}".format(str(e)))
sys.exit(1)
except IOError as e:
logger.log("Error in file reading:{}".format(str(e)))
sys.exit(1)
| 25.8
| 61
| 0.595607
|
c07275ca79cc93152c36a947fe9f686b968cec67
| 2,144
|
py
|
Python
|
deluca/agents/_pid.py
|
vvanirudh/deluca
|
673e66038547db90a7b23335cfe7508728076a4d
|
[
"Apache-2.0"
] | 25
|
2020-10-27T19:10:36.000Z
|
2022-01-04T14:34:29.000Z
|
deluca/agents/_pid.py
|
vvanirudh/deluca
|
673e66038547db90a7b23335cfe7508728076a4d
|
[
"Apache-2.0"
] | 5
|
2020-10-15T00:52:30.000Z
|
2021-01-18T18:42:40.000Z
|
deluca/agents/_pid.py
|
vvanirudh/deluca
|
673e66038547db90a7b23335cfe7508728076a4d
|
[
"Apache-2.0"
] | 5
|
2020-12-04T23:12:13.000Z
|
2021-06-26T12:38:06.000Z
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""deluca.agents._pid"""
from numbers import Real
from typing import Sequence
import jax.numpy as jnp
from deluca.agents.core import Agent
DEFAULT_K = [3.0, 4.0, 0.0]
class PID(Agent):
"""
PID: agent that plays a PID policy
Todo:
- Allow for variable time deltas
"""
def __init__(self, K: Sequence[int] = None, RC: Real = 0.5, dt: Real = 0.03, **kwargs) -> None:
"""
Description: initializes the PID agent
Args:
K (Sequence[int]): sequence of PID parameters
RC (Real): decay parameter
dt (Real): time increment
Returns:
None
"""
if not isinstance(K, Sequence) or len(K) != 3 or not all(isinstance(x, Real) for x in K):
self.throw(ValueError, "K must be a list or tuple of 3 real numbers, P, I, and D")
# controller coefficients
self.K_P, self.K_I, self.K_D = K or DEFAULT_K
# controller states
self.P, self.I, self.D = 0.0, 0.0, 0.0
self.RC = RC
self.dt = dt
def __call__(self, state: jnp.ndarray) -> jnp.ndarray:
"""
Description: provide an action given a state
Args:
state (jnp.ndarray): the error PID must compensate for
Returns:
jnp.ndarray: action to take
"""
decay = self.dt / (self.dt + self.RC)
self.I += decay * (state - self.I)
self.D += decay * (state - self.P - self.D)
self.P = state
return self.K_P * self.P + self.K_I * self.I + self.K_D * self.D
| 28.972973
| 99
| 0.61334
|
fa6bfac94b341d253b9735769c90589d0f9ce9a3
| 7,725
|
py
|
Python
|
ginga/rv/plugins/ScreenShot.py
|
SimonKrughoff/ginga
|
abcd284228400092b2d7b73bb51d30632e90ee03
|
[
"BSD-3-Clause"
] | null | null | null |
ginga/rv/plugins/ScreenShot.py
|
SimonKrughoff/ginga
|
abcd284228400092b2d7b73bb51d30632e90ee03
|
[
"BSD-3-Clause"
] | null | null | null |
ginga/rv/plugins/ScreenShot.py
|
SimonKrughoff/ginga
|
abcd284228400092b2d7b73bb51d30632e90ee03
|
[
"BSD-3-Clause"
] | null | null | null |
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
"""
Capture PNG or JPEG images of the channel viewer image.
**Usage**
1. Select the RGB graphics type for the snap from the "Type" combo box.
2. Press "Snap" when you have the channel image the way you want to capture it.
A copy of the RGB image will be loaded into the ``ScreenShot`` viewer.
You can pan and zoom within the ``ScreenShot`` viewer like a normal Ginga
viewer to examine detail (e.g., see the magnified difference between
JPEG and PNG formats).
3. Repeat (1) and (2) until you have the image you want.
4. Enter a valid path for a new file into the "Folder" text box.
5. Enter a valid name for a new file into the "Name" text box.
There is no need to add the file extension; it will be added, if needed.
6. Press the "Save" button. The file will be saved where you specified.
**Notes**
* PNG offers less artifacts for overlaid graphics, but files are larger
than JPEG.
* The "Center" button will center the snap image; "Fit" will set the
zoom to fit it to the window; and "Clear" will clear the image.
* Press "1" in the screenshot viewer to zoom to 100% pixels.
"""
import os.path
import shutil
import tempfile
from ginga import GingaPlugin
from ginga.RGBImage import RGBImage
from ginga.gw import Widgets, Viewers
__all__ = ['ScreenShot']
class ScreenShot(GingaPlugin.LocalPlugin):
def __init__(self, fv, fitsimage):
# superclass defines some variables for us, like logger
super(ScreenShot, self).__init__(fv, fitsimage)
self.tosave_type = 'png'
self.saved_type = None
self.savetypes = ('png', 'jpeg')
self.tmpname = os.path.join(tempfile.tempdir, "__snap")
self.save_path = ''
self.save_name = ''
self._wd = 200
self._ht = 200
def build_gui(self, container):
vbox = Widgets.VBox()
vbox.set_border_width(4)
vbox.set_spacing(2)
vbox1 = Widgets.VBox()
# Uncomment to debug; passing parent logger generates too
# much noise in the main logger
#zi = Viewers.CanvasView(logger=self.logger)
zi = Viewers.CanvasView(logger=None)
zi.set_desired_size(self._wd, self._ht)
zi.enable_autozoom('once')
zi.enable_autocuts('once')
zi.enable_autocenter('once')
zi.set_zoom_algorithm('step')
zi.cut_levels(0, 255)
zi.transform(False, True, False)
#zi.set_scale_limits(0.001, 1000.0)
zi.set_bg(0.4, 0.4, 0.4)
zi.set_color_map('gray')
zi.set_intensity_map('ramp')
# for debugging
zi.set_name('scrnimage')
self.scrnimage = zi
bd = zi.get_bindings()
bd.enable_zoom(True)
bd.enable_pan(True)
bd.enable_cmap(False)
zi.show_mode_indicator(True)
iw = Viewers.ScrolledView(zi)
iw.resize(self._wd, self._ht)
vbox1.add_widget(iw, stretch=1)
captions = (('Type:', 'label', 'grtype', 'combobox',
'Snap', 'button'),
('Clear', 'button', 'Center', 'button', 'Fit', 'button',
'Full', 'button'),
)
w, b = Widgets.build_info(captions, orientation='vertical')
self.w = b
combobox = b.grtype
for name in self.savetypes:
combobox.append_text(name)
index = self.savetypes.index(self.tosave_type)
combobox.set_index(index)
combobox.add_callback('activated', lambda w, idx: self.set_type(idx))
combobox.set_tooltip("Set the format of the snap image")
b.snap.set_tooltip(
"Click to grab a snapshot of this channel viewer image")
b.snap.add_callback('activated', self.snap_cb)
b.clear.set_tooltip("Clear the snap image")
b.clear.add_callback('activated', self.clear_cb)
b.center.set_tooltip("Center the snap image")
b.center.add_callback('activated', self.center_cb)
b.fit.set_tooltip("Fit snap image to window")
b.fit.add_callback('activated', self.fit_cb)
b.full.set_tooltip("View at 100% (1:1)")
b.full.add_callback('activated', self.full_cb)
vbox1.add_widget(w, stretch=0)
fr = Widgets.Frame("Screenshot")
fr.set_widget(vbox1)
vpaned = Widgets.Splitter(orientation='vertical')
vpaned.add_widget(fr)
vpaned.add_widget(Widgets.Label(''))
vbox2 = Widgets.VBox()
fr = Widgets.Frame("Save File")
captions = (('Folder:', 'label', 'folder', 'entry'),
('Name:', 'label', 'name', 'entry'),
('Save', 'button'),
)
w, b = Widgets.build_info(captions, orientation='vertical')
self.w.update(b)
b.folder.set_text(self.save_path)
b.folder.set_tooltip("Set the folder path for the snap image")
b.name.set_text(self.save_name)
b.name.set_tooltip("Set the name for the snap image")
b.save.set_tooltip("Click to save the last snap")
b.save.add_callback('activated', self.save_cb)
fr.set_widget(w)
vbox2.add_widget(fr, stretch=0)
# stretch
spacer = Widgets.Label('')
vbox2.add_widget(spacer, stretch=1)
btns = Widgets.HBox()
btns.set_spacing(3)
btn = Widgets.Button("Close")
btn.add_callback('activated', lambda w: self.close())
btns.add_widget(btn, stretch=0)
btn = Widgets.Button("Help")
btn.add_callback('activated', lambda w: self.help())
btns.add_widget(btn, stretch=0)
btns.add_widget(Widgets.Label(''), stretch=1)
vbox2.add_widget(btns, stretch=0)
#vpaned.add_widget(vbox2)
vbox.add_widget(vpaned, stretch=1)
container.add_widget(vbox, stretch=1)
container.add_widget(vbox2, stretch=0)
def set_type(self, idx):
self.tosave_type = self.savetypes[idx]
return True
def close(self):
self.fv.stop_local_plugin(self.chname, str(self))
return True
def start(self):
pass
def stop(self):
self.saved_type = None
def snap_cb(self, w):
format = self.tosave_type
# snap image
self.fv.error_wrap(self.fitsimage.save_rgb_image_as_file,
self.tmpname, format=format)
self.saved_type = format
img = RGBImage(logger=self.logger)
img.load_file(self.tmpname)
self.scrnimage.set_image(img)
def save_cb(self, w):
format = self.saved_type
if format is None:
return self.fv.show_error("Please save an image first.")
# create filename
filename = self.w.name.get_text().strip()
if len(filename) == 0:
return self.fv.show_error("Please set a name for saving the file")
self.save_name = filename
if not filename.lower().endswith('.' + format):
filename = filename + '.' + format
# join to path
path = self.w.folder.get_text().strip()
if path == '':
path = filename
else:
self.save_path = path
path = os.path.join(path, filename)
# copy last saved file
self.fv.error_wrap(shutil.copyfile, self.tmpname, path)
def center_cb(self, w):
self.scrnimage.center_image(no_reset=True)
def fit_cb(self, w):
self.scrnimage.zoom_fit(no_reset=True)
def full_cb(self, w):
self.scrnimage.scale_to(1.0, 1.0, no_reset=True)
def clear_cb(self, w):
self.scrnimage.clear()
def __str__(self):
return 'screenshot'
# END
| 31.659836
| 79
| 0.616699
|
4687f3f01cdd300c09d8b4c5df3416ed6988a6dc
| 2,271
|
py
|
Python
|
test/test_oneview_sas_logical_jbod_attachment_facts.py
|
nabhajit-ray/oneview-ansible
|
b31af8a696013bac7a1900748a2fa5ba491fe8e2
|
[
"Apache-2.0"
] | 108
|
2016-06-28T18:14:08.000Z
|
2022-02-21T09:16:06.000Z
|
test/test_oneview_sas_logical_jbod_attachment_facts.py
|
HPE-Japan-Presales/oneview-ansible
|
26eb13354333d862d9e80f07e3fe9bbe2eb59af3
|
[
"Apache-2.0"
] | 248
|
2016-07-14T12:50:17.000Z
|
2022-02-06T18:57:16.000Z
|
test/test_oneview_sas_logical_jbod_attachment_facts.py
|
HPE-Japan-Presales/oneview-ansible
|
26eb13354333d862d9e80f07e3fe9bbe2eb59af3
|
[
"Apache-2.0"
] | 88
|
2016-06-29T15:52:44.000Z
|
2022-03-10T12:34:41.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
###
# Copyright (2016-2017) Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
import pytest
from hpe_test_utils import OneViewBaseFactsTest
from oneview_module_loader import SasLogicalJbodAttachmentFactsModule
ERROR_MSG = 'Fake message error'
PARAMS_GET_ALL = dict(
config='config.json',
name=None
)
PARAMS_GET_BY_NAME = dict(
config='config.json',
name="SAS Logical JBOD Attachment 2"
)
SAS_JBOD_LOGICAL_ATTACHMENTS = [{"name": "SAS Logical JBOD Attachment 1"},
{"name": "SAS Logical JBOD Attachment 2"}]
@pytest.mark.resource(TestSasLogicalJbodAttachmentFactsModule='sas_logical_jbod_attachments')
class TestSasLogicalJbodAttachmentFactsModule(OneViewBaseFactsTest):
def test_should_get_all_sas_logical_jbod_attachments(self):
self.resource.get_all.return_value = SAS_JBOD_LOGICAL_ATTACHMENTS
self.mock_ansible_module.params = PARAMS_GET_ALL
SasLogicalJbodAttachmentFactsModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
ansible_facts=dict(sas_logical_jbod_attachments=(SAS_JBOD_LOGICAL_ATTACHMENTS))
)
def test_should_get_sas_logical_jbod_attachment_by_name(self):
self.resource.get_by.return_value = [SAS_JBOD_LOGICAL_ATTACHMENTS[1]]
self.mock_ansible_module.params = PARAMS_GET_BY_NAME
SasLogicalJbodAttachmentFactsModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
ansible_facts=dict(sas_logical_jbod_attachments=([SAS_JBOD_LOGICAL_ATTACHMENTS[1]]))
)
if __name__ == '__main__':
pytest.main([__file__])
| 33.895522
| 96
| 0.747248
|
016e36c2ac31ecc157bb162fed764779ca10b108
| 1,086
|
py
|
Python
|
coding_problems/first_unique_character.py
|
NescobarAlopLop/miscellaneous
|
8e33cb34ddc54dad233d2418d4a90a96ce3c393e
|
[
"MIT"
] | null | null | null |
coding_problems/first_unique_character.py
|
NescobarAlopLop/miscellaneous
|
8e33cb34ddc54dad233d2418d4a90a96ce3c393e
|
[
"MIT"
] | null | null | null |
coding_problems/first_unique_character.py
|
NescobarAlopLop/miscellaneous
|
8e33cb34ddc54dad233d2418d4a90a96ce3c393e
|
[
"MIT"
] | null | null | null |
"""
Given a string, find the first non-repeating character in it and return it's index. If it doesn't exist, return -1.
Examples:
s = "leetcode"
return 0.
s = "loveleetcode",
return 2.
Note: You may assume the string contain only lowercase letters.
"""
import unittest
class Solution:
def firstUniqChar(self, s):
"""
:type s: str
:rtype: int
"""
count = {}
for char in s:
if char in count:
count[char] += 1
else:
count[char] = 1
for i in range(len(s)):
if count[s[i]] == 1:
return i
return -1
class TestSolution(unittest.TestCase):
sol = Solution()
def test_0(self):
s = "leetcode"
res = self.sol.firstUniqChar(s)
print("result: {}".format(res))
self.assertEqual(res, 0)
def test_1(self):
s = "loveleetcode"
res = self.sol.firstUniqChar(s)
print("result: {}".format(res))
self.assertEqual(res, 2)
if __name__ == '__main__':
unittest.main()
| 20.111111
| 116
| 0.540516
|
14b9029ab9cf14ee3a6dc37287c68d3f0c4670df
| 555
|
py
|
Python
|
examples/make_stake_pool_keys.py
|
Canuckz-NFT/Cardano-Tools
|
1cbe9041a9ab7fd0fc9018975acff3490aa1d8ca
|
[
"Apache-2.0"
] | null | null | null |
examples/make_stake_pool_keys.py
|
Canuckz-NFT/Cardano-Tools
|
1cbe9041a9ab7fd0fc9018975acff3490aa1d8ca
|
[
"Apache-2.0"
] | null | null | null |
examples/make_stake_pool_keys.py
|
Canuckz-NFT/Cardano-Tools
|
1cbe9041a9ab7fd0fc9018975acff3490aa1d8ca
|
[
"Apache-2.0"
] | 1
|
2021-09-11T12:26:27.000Z
|
2021-09-11T12:26:27.000Z
|
import sys
sys.path.append('../')
from cardano_tools import ShelleyTools
# Inputs
genesis_file = "/home/lovelace/cardano-node/shelley_testnet-genesis.json"
working_dir = "/home/lovelace/cardano-node/"
# Create a ShelleyTools object
shelley = ShelleyTools(
"/usr/local/bin/cardano-cli",
"/home/lovelace/cardano-node/node.socket",
working_dir,
network="--testnet-magic 42" # <-- for testnet only
)
# Generate the pool keys.
pool_id = shelley.create_block_producing_keys(genesis_file, pool_name="POOL")
print(f"Stake Pool ID: {pool_id}")
| 27.75
| 77
| 0.740541
|
78f96486a38050ccefe6663da34082a661ef8076
| 925
|
py
|
Python
|
app.py
|
itcosplay/bot_test
|
64d2e1e0333d44df69732447af3160c3cf23c11d
|
[
"MIT"
] | null | null | null |
app.py
|
itcosplay/bot_test
|
64d2e1e0333d44df69732447af3160c3cf23c11d
|
[
"MIT"
] | null | null | null |
app.py
|
itcosplay/bot_test
|
64d2e1e0333d44df69732447af3160c3cf23c11d
|
[
"MIT"
] | null | null | null |
import logging
from aiogram import Bot, types
from aiogram.contrib.middlewares.logging import LoggingMiddleware
from aiogram.dispatcher import Dispatcher
from aiogram.dispatcher.webhook import SendMessage
from aiogram.utils.executor import start_webhook
API_TOKEN = 'BOT_TOKEN_HERE'
# webhook settings
WEBHOOK_HOST = 'localhost'
WEBHOOK_PATH = ''
WEBHOOK_URL = f"{WEBHOOK_HOST}{WEBHOOK_PATH}"
# webserver settings
WEBAPP_HOST = 'localhost' # or ip
WEBAPP_PORT = 5000
# bot = Bot(token=API_TOKEN)
# dp = Dispatcher(bot)
# dp.middleware.setup(LoggingMiddleware())
if __name__ == '__main__':
from aiogram import executor
from handlers import dp
# executor.start_polling(dp)
start_webhook (
dispatcher=dp,
webhook_path=WEBHOOK_PATH,
# on_startup=on_startup,
# on_shutdown=on_shutdown,
skip_updates=True,
host=WEBAPP_HOST,
port=WEBAPP_PORT,
)
| 22.560976
| 65
| 0.731892
|
365d182fa48cb1b4e6d9e126b236e5f52763ab56
| 751
|
py
|
Python
|
aliexpresscoupons/settings.py
|
ils78/aliexpresscoupons
|
19d922515412206784ab299c2b853b5c67d89679
|
[
"MIT"
] | 6
|
2015-04-10T12:27:24.000Z
|
2020-12-19T20:19:16.000Z
|
aliexpresscoupons/settings.py
|
ils78/aliexpresscoupons
|
19d922515412206784ab299c2b853b5c67d89679
|
[
"MIT"
] | null | null | null |
aliexpresscoupons/settings.py
|
ils78/aliexpresscoupons
|
19d922515412206784ab299c2b853b5c67d89679
|
[
"MIT"
] | 2
|
2018-07-30T10:14:41.000Z
|
2021-02-21T00:55:53.000Z
|
# -*- coding: utf-8 -*-
# Scrapy settings for aliexpress project
#
# For simplicity, this file contains only the most important settings by
# default. All the other settings are documented here:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
#
BOT_NAME = 'aliexpresscoupons'
SPIDER_MODULES = ['aliexpresscoupons.spiders']
NEWSPIDER_MODULE = 'aliexpresscoupons.spiders'
COOKIES_ENABLED = True
RANDOMIZE_DOWNLOAD_DELAY = True
DOWNLOAD_DELAY = 1
USER_AGENT = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/37.0.2062.120 Chrome/37.0.2062.120 Safari/537.36'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'aliexpress (+http://www.yourdomain.com)'
| 31.291667
| 150
| 0.761651
|
092704e16f596997d2f0ea741eddd67a1475c22b
| 4,166
|
py
|
Python
|
profit/types/spend_bundle.py
|
zcomputerwiz/profit-blockchain
|
d6d4337ea7c418c66f05f22a263e94190452aed6
|
[
"Apache-2.0"
] | 7
|
2022-03-15T01:33:35.000Z
|
2022-03-26T21:29:45.000Z
|
profit/types/spend_bundle.py
|
zcomputerwiz/profit-blockchain
|
d6d4337ea7c418c66f05f22a263e94190452aed6
|
[
"Apache-2.0"
] | 3
|
2022-03-19T23:02:18.000Z
|
2022-03-19T23:02:19.000Z
|
profit/types/spend_bundle.py
|
zcomputerwiz/profit-blockchain
|
d6d4337ea7c418c66f05f22a263e94190452aed6
|
[
"Apache-2.0"
] | null | null | null |
import dataclasses
import warnings
from dataclasses import dataclass
from typing import List
from blspy import AugSchemeMPL, G2Element
from profit.consensus.default_constants import DEFAULT_CONSTANTS
from profit.types.blockchain_format.coin import Coin
from profit.types.blockchain_format.sized_bytes import bytes32
from profit.util.streamable import Streamable, dataclass_from_dict, recurse_jsonify, streamable
from profit.wallet.util.debug_spend_bundle import debug_spend_bundle
from .coin_spend import CoinSpend
@dataclass(frozen=True)
@streamable
class SpendBundle(Streamable):
"""
This is a list of coins being spent along with their solution programs, and a single
aggregated signature. This is the object that most closely corresponds to a bitcoin
transaction (although because of non-interactive signature aggregation, the boundaries
between transactions are more flexible than in bitcoin).
"""
coin_spends: List[CoinSpend]
aggregated_signature: G2Element
@property
def coin_solutions(self):
return self.coin_spends
@classmethod
def aggregate(cls, spend_bundles) -> "SpendBundle":
coin_spends: List[CoinSpend] = []
sigs: List[G2Element] = []
for bundle in spend_bundles:
coin_spends += bundle.coin_spends
sigs.append(bundle.aggregated_signature)
aggregated_signature = AugSchemeMPL.aggregate(sigs)
return cls(coin_spends, aggregated_signature)
def additions(self) -> List[Coin]:
items: List[Coin] = []
for coin_spend in self.coin_spends:
items.extend(coin_spend.additions())
return items
def removals(self) -> List[Coin]:
"""This should be used only by wallet"""
return [_.coin for _ in self.coin_spends]
def fees(self) -> int:
"""Unsafe to use for fees validation!!!"""
amount_in = sum(_.amount for _ in self.removals())
amount_out = sum(_.amount for _ in self.additions())
return amount_in - amount_out
def name(self) -> bytes32:
return self.get_hash()
def debug(self, agg_sig_additional_data=DEFAULT_CONSTANTS.AGG_SIG_ME_ADDITIONAL_DATA):
debug_spend_bundle(self, agg_sig_additional_data)
def not_ephemeral_additions(self):
all_removals = self.removals()
all_additions = self.additions()
result: List[Coin] = []
for add in all_additions:
if add in all_removals:
continue
result.append(add)
return result
# Note that `coin_spends` used to have the bad name `coin_solutions`.
# Some API still expects this name. For now, we accept both names.
#
# TODO: continue this deprecation. Eventually, all code below here should be removed.
# 1. set `exclude_modern_keys` to `False` (and manually set to `True` where necessary)
# 2. set `include_legacy_keys` to `False` (and manually set to `False` where necessary)
# 3. remove all references to `include_legacy_keys=True`
# 4. remove all code below this point
@classmethod
def from_json_dict(cls, json_dict):
if "coin_solutions" in json_dict:
if "coin_spends" not in json_dict:
json_dict = dict(
aggregated_signature=json_dict["aggregated_signature"], coin_spends=json_dict["coin_solutions"]
)
warnings.warn("`coin_solutions` is now `coin_spends` in `SpendBundle.from_json_dict`")
else:
raise ValueError("JSON contains both `coin_solutions` and `coin_spends`, just use `coin_spends`")
return dataclass_from_dict(cls, json_dict)
def to_json_dict(self, include_legacy_keys: bool = True, exclude_modern_keys: bool = True):
if include_legacy_keys is False and exclude_modern_keys is True:
raise ValueError("`coin_spends` not included in legacy or modern outputs")
d = dataclasses.asdict(self)
if include_legacy_keys:
d["coin_solutions"] = d["coin_spends"]
if exclude_modern_keys:
del d["coin_spends"]
return recurse_jsonify(d)
| 37.872727
| 115
| 0.68747
|
8fec80e666953ced60ce42f1388a98742fbf1f4d
| 117
|
py
|
Python
|
restaurants/admin.py
|
sgr-smile2015/website
|
a6a90e8fff52a3d8adb797cfe2e6f74c4ad0e3c0
|
[
"Apache-2.0"
] | 1
|
2018-02-27T08:24:12.000Z
|
2018-02-27T08:24:12.000Z
|
restaurants/admin.py
|
sgr-smile2015/website
|
a6a90e8fff52a3d8adb797cfe2e6f74c4ad0e3c0
|
[
"Apache-2.0"
] | null | null | null |
restaurants/admin.py
|
sgr-smile2015/website
|
a6a90e8fff52a3d8adb797cfe2e6f74c4ad0e3c0
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib import admin
from .models import RestaurantsLocation
admin.site.register(RestaurantsLocation)
| 16.714286
| 40
| 0.846154
|
2dbce44693509a02dbe7914586afcb4c0959cb6b
| 3,761
|
py
|
Python
|
tests/test_bmp2sysex.py
|
Skoddiethecat/bmp2sysex
|
33f1fccc5dfd25491df2477e06b98b236ea0f3f4
|
[
"MIT"
] | null | null | null |
tests/test_bmp2sysex.py
|
Skoddiethecat/bmp2sysex
|
33f1fccc5dfd25491df2477e06b98b236ea0f3f4
|
[
"MIT"
] | null | null | null |
tests/test_bmp2sysex.py
|
Skoddiethecat/bmp2sysex
|
33f1fccc5dfd25491df2477e06b98b236ea0f3f4
|
[
"MIT"
] | null | null | null |
import tempfile
import warnings
from unittest import TestCase
import numpy as np
from PIL import Image
from bmp2sysex.__main__ import main
class TestMain(TestCase):
def test_16x16_1bit_png(self):
expected = "1010100000000001101010000000000011101000000000011010100000000000101010000000000100000000000000000000000000000001110101010111000010010101010100011001110101110000100101010101000111010101010100000000000000000001000000000000000000000000000000011010101010101010" # noqa: E501
arr = (
np.array([1 - int(i) for i in expected])
.reshape((16, 16))
.astype(np.bool)
)
with tempfile.NamedTemporaryFile(suffix=".png") as tmp:
im = Image.fromarray(arr)
im.mode = "1"
im.save(tmp.name)
self.assertEqual(main(tmp.name), expected)
def test_16x16_1bit_bmp(self):
expected = "1010100000000001101010000000000011101000000000011010100000000000101010000000000100000000000000000000000000000001110101010111000010010101010100011001110101110000100101010101000111010101010100000000000000000001000000000000000000000000000000011010101010101010" # noqa: E501
arr = (
np.array([1 - int(i) for i in expected])
.reshape((16, 16))
.astype(np.bool)
)
with tempfile.NamedTemporaryFile(suffix=".bmp") as tmp:
im = Image.fromarray(arr)
im.mode = "1"
im.save(tmp.name)
self.assertEqual(main(tmp.name), expected)
def test_16x16_8bit(self):
expected = "1010100000000001101010000000000011101000000000011010100000000000101010000000000100000000000000000000000000000001110101010111000010010101010100011001110101110000100101010101000111010101010100000000000000000001000000000000000000000000000000011010101010101010" # noqa: E501
arr = (
np.array([255 * (1 - int(i)) for i in expected])
.reshape((16, 16))
.astype(np.bool)
)
with tempfile.NamedTemporaryFile(suffix=".png") as tmp:
im = Image.fromarray(arr)
im.mode = "L"
im.save(tmp.name)
with warnings.catch_warnings(record=True) as warning_list:
main(tmp.name)
self.assertIn(
"Only 1-bit images are supported. Will convert to 1-bit",
[str(warning.message) for warning in warning_list],
)
def test_8x8_1bit_png(self):
expected = (
"1010100000000001101010000000000011101000000000011010100000000000"
)
arr = (
np.array([1 - int(i) for i in expected])
.reshape((8, 8))
.astype(np.bool)
)
with tempfile.NamedTemporaryFile(suffix=".png") as tmp:
im = Image.fromarray(arr)
im.mode = "1"
im.save(tmp.name)
with self.assertRaises(RuntimeError) as err:
main(tmp.name)
self.assertIn(
"Only 16x16 images are supported", str(err.exception)
)
def test_white1(self):
expected = "1010100000000001101010000000000011101000000000011010100000000000101010000000000100000000000000000000000000000001110101010111000010010101010100011001110101110000100101010101000111010101010100000000000000000001000000000000000000000000000000011010101010101010" # noqa: E501
arr = (
np.array([int(i) for i in expected])
.reshape((16, 16))
.astype(np.bool)
)
with tempfile.NamedTemporaryFile(suffix=".png") as tmp:
im = Image.fromarray(arr)
im.mode = "1"
im.save(tmp.name)
self.assertEqual(main(tmp.name, white1=True), expected)
| 43.229885
| 291
| 0.654347
|
13fba01734591fdaf32314af34a71b6f50bec222
| 137,853
|
py
|
Python
|
whoville/cloudbreak/apis/v2stacks_api.py
|
mikchaos/whoville
|
6eabaea4b74ac0b632c03db8252590131c6ce63b
|
[
"Apache-2.0"
] | null | null | null |
whoville/cloudbreak/apis/v2stacks_api.py
|
mikchaos/whoville
|
6eabaea4b74ac0b632c03db8252590131c6ce63b
|
[
"Apache-2.0"
] | null | null | null |
whoville/cloudbreak/apis/v2stacks_api.py
|
mikchaos/whoville
|
6eabaea4b74ac0b632c03db8252590131c6ce63b
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Cloudbreak API
Cloudbreak is a powerful left surf that breaks over a coral reef, a mile off southwest the island of Tavarua, Fiji. Cloudbreak is a cloud agnostic Hadoop as a Service API. Abstracts the provisioning and ease management and monitoring of on-demand clusters. SequenceIQ's Cloudbreak is a RESTful application development platform with the goal of helping developers to build solutions for deploying Hadoop YARN clusters in different environments. Once it is deployed in your favourite servlet container it exposes a REST API allowing to span up Hadoop clusters of arbitary sizes and cloud providers. Provisioning Hadoop has never been easier. Cloudbreak is built on the foundation of cloud providers API (Amazon AWS, Microsoft Azure, Google Cloud Platform, Openstack), Apache Ambari, Docker lightweight containers, Swarm and Consul. For further product documentation follow the link: <a href=\"http://hortonworks.com/apache/cloudbreak/\">http://hortonworks.com/apache/cloudbreak/</a>
OpenAPI spec version: 2.7.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class V2stacksApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def delete_instance_stack_v2(self, stack_id, instance_id, **kwargs):
"""
delete instance resource from stack
Stacks are template instances - a running cloud infrastructure created based on a template. Stacks are always launched on behalf of a cloud user account. Stacks support a wide range of resources, allowing you to build a highly available, reliable, and scalable infrastructure for your application needs.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_instance_stack_v2(stack_id, instance_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int stack_id: (required)
:param str instance_id: (required)
:param bool forced:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_instance_stack_v2_with_http_info(stack_id, instance_id, **kwargs)
else:
(data) = self.delete_instance_stack_v2_with_http_info(stack_id, instance_id, **kwargs)
return data
def delete_instance_stack_v2_with_http_info(self, stack_id, instance_id, **kwargs):
"""
delete instance resource from stack
Stacks are template instances - a running cloud infrastructure created based on a template. Stacks are always launched on behalf of a cloud user account. Stacks support a wide range of resources, allowing you to build a highly available, reliable, and scalable infrastructure for your application needs.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_instance_stack_v2_with_http_info(stack_id, instance_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int stack_id: (required)
:param str instance_id: (required)
:param bool forced:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['stack_id', 'instance_id', 'forced']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_instance_stack_v2" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'stack_id' is set
if ('stack_id' not in params) or (params['stack_id'] is None):
raise ValueError("Missing the required parameter `stack_id` when calling `delete_instance_stack_v2`")
# verify the required parameter 'instance_id' is set
if ('instance_id' not in params) or (params['instance_id'] is None):
raise ValueError("Missing the required parameter `instance_id` when calling `delete_instance_stack_v2`")
collection_formats = {}
path_params = {}
if 'stack_id' in params:
path_params['stackId'] = params['stack_id']
if 'instance_id' in params:
path_params['instanceId'] = params['instance_id']
query_params = []
if 'forced' in params:
query_params.append(('forced', params['forced']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v2/stacks/{stackId}/{instanceId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_instances_stack_v2(self, stack_id, **kwargs):
"""
delete instance resource from stack
Stacks are template instances - a running cloud infrastructure created based on a template. Stacks are always launched on behalf of a cloud user account. Stacks support a wide range of resources, allowing you to build a highly available, reliable, and scalable infrastructure for your application needs.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_instances_stack_v2(stack_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int stack_id: (required)
:param list[str] instance_ids:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_instances_stack_v2_with_http_info(stack_id, **kwargs)
else:
(data) = self.delete_instances_stack_v2_with_http_info(stack_id, **kwargs)
return data
def delete_instances_stack_v2_with_http_info(self, stack_id, **kwargs):
"""
delete instance resource from stack
Stacks are template instances - a running cloud infrastructure created based on a template. Stacks are always launched on behalf of a cloud user account. Stacks support a wide range of resources, allowing you to build a highly available, reliable, and scalable infrastructure for your application needs.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_instances_stack_v2_with_http_info(stack_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int stack_id: (required)
:param list[str] instance_ids:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['stack_id', 'instance_ids']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_instances_stack_v2" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'stack_id' is set
if ('stack_id' not in params) or (params['stack_id'] is None):
raise ValueError("Missing the required parameter `stack_id` when calling `delete_instances_stack_v2`")
collection_formats = {}
path_params = {}
if 'stack_id' in params:
path_params['stackId'] = params['stack_id']
query_params = []
if 'instance_ids' in params:
query_params.append(('instanceIds', params['instance_ids']))
collection_formats['instanceIds'] = 'multi'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v2/stacks/{stackId}/deleteInstances', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_private_stack_v2(self, name, **kwargs):
"""
delete private stack by name
Stacks are template instances - a running cloud infrastructure created based on a template. Stacks are always launched on behalf of a cloud user account. Stacks support a wide range of resources, allowing you to build a highly available, reliable, and scalable infrastructure for your application needs.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_private_stack_v2(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: (required)
:param bool forced:
:param bool delete_dependencies:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_private_stack_v2_with_http_info(name, **kwargs)
else:
(data) = self.delete_private_stack_v2_with_http_info(name, **kwargs)
return data
def delete_private_stack_v2_with_http_info(self, name, **kwargs):
"""
delete private stack by name
Stacks are template instances - a running cloud infrastructure created based on a template. Stacks are always launched on behalf of a cloud user account. Stacks support a wide range of resources, allowing you to build a highly available, reliable, and scalable infrastructure for your application needs.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_private_stack_v2_with_http_info(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: (required)
:param bool forced:
:param bool delete_dependencies:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'forced', 'delete_dependencies']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_private_stack_v2" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `delete_private_stack_v2`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
query_params = []
if 'forced' in params:
query_params.append(('forced', params['forced']))
if 'delete_dependencies' in params:
query_params.append(('deleteDependencies', params['delete_dependencies']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v2/stacks/user/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_public_stack_v2(self, name, **kwargs):
"""
delete public (owned) or private stack by name
Stacks are template instances - a running cloud infrastructure created based on a template. Stacks are always launched on behalf of a cloud user account. Stacks support a wide range of resources, allowing you to build a highly available, reliable, and scalable infrastructure for your application needs.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_public_stack_v2(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: (required)
:param bool forced:
:param bool delete_dependencies:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_public_stack_v2_with_http_info(name, **kwargs)
else:
(data) = self.delete_public_stack_v2_with_http_info(name, **kwargs)
return data
def delete_public_stack_v2_with_http_info(self, name, **kwargs):
"""
delete public (owned) or private stack by name
Stacks are template instances - a running cloud infrastructure created based on a template. Stacks are always launched on behalf of a cloud user account. Stacks support a wide range of resources, allowing you to build a highly available, reliable, and scalable infrastructure for your application needs.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_public_stack_v2_with_http_info(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: (required)
:param bool forced:
:param bool delete_dependencies:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'forced', 'delete_dependencies']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_public_stack_v2" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `delete_public_stack_v2`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
query_params = []
if 'forced' in params:
query_params.append(('forced', params['forced']))
if 'delete_dependencies' in params:
query_params.append(('deleteDependencies', params['delete_dependencies']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v2/stacks/account/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_stack_v2(self, id, **kwargs):
"""
delete stack by id
Stacks are template instances - a running cloud infrastructure created based on a template. Stacks are always launched on behalf of a cloud user account. Stacks support a wide range of resources, allowing you to build a highly available, reliable, and scalable infrastructure for your application needs.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_stack_v2(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: (required)
:param bool forced:
:param bool delete_dependencies:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_stack_v2_with_http_info(id, **kwargs)
else:
(data) = self.delete_stack_v2_with_http_info(id, **kwargs)
return data
def delete_stack_v2_with_http_info(self, id, **kwargs):
"""
delete stack by id
Stacks are template instances - a running cloud infrastructure created based on a template. Stacks are always launched on behalf of a cloud user account. Stacks support a wide range of resources, allowing you to build a highly available, reliable, and scalable infrastructure for your application needs.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_stack_v2_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: (required)
:param bool forced:
:param bool delete_dependencies:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'forced', 'delete_dependencies']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_stack_v2" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `delete_stack_v2`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
if 'forced' in params:
query_params.append(('forced', params['forced']))
if 'delete_dependencies' in params:
query_params.append(('deleteDependencies', params['delete_dependencies']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v2/stacks/{id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_all_stack_v2(self, **kwargs):
"""
retrieve all stacks
Stacks are template instances - a running cloud infrastructure created based on a template. Stacks are always launched on behalf of a cloud user account. Stacks support a wide range of resources, allowing you to build a highly available, reliable, and scalable infrastructure for your application needs.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_all_stack_v2(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: list[AutoscaleStackResponse]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_all_stack_v2_with_http_info(**kwargs)
else:
(data) = self.get_all_stack_v2_with_http_info(**kwargs)
return data
def get_all_stack_v2_with_http_info(self, **kwargs):
"""
retrieve all stacks
Stacks are template instances - a running cloud infrastructure created based on a template. Stacks are always launched on behalf of a cloud user account. Stacks support a wide range of resources, allowing you to build a highly available, reliable, and scalable infrastructure for your application needs.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_all_stack_v2_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: list[AutoscaleStackResponse]
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_all_stack_v2" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v2/stacks/all', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[AutoscaleStackResponse]',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_certificate_stack_v2(self, id, **kwargs):
"""
retrieves the TLS certificate used by the gateway
Stacks are template instances - a running cloud infrastructure created based on a template. Stacks are always launched on behalf of a cloud user account. Stacks support a wide range of resources, allowing you to build a highly available, reliable, and scalable infrastructure for your application needs.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_certificate_stack_v2(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: (required)
:return: CertificateResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_certificate_stack_v2_with_http_info(id, **kwargs)
else:
(data) = self.get_certificate_stack_v2_with_http_info(id, **kwargs)
return data
def get_certificate_stack_v2_with_http_info(self, id, **kwargs):
"""
retrieves the TLS certificate used by the gateway
Stacks are template instances - a running cloud infrastructure created based on a template. Stacks are always launched on behalf of a cloud user account. Stacks support a wide range of resources, allowing you to build a highly available, reliable, and scalable infrastructure for your application needs.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_certificate_stack_v2_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: (required)
:return: CertificateResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_certificate_stack_v2" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_certificate_stack_v2`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v2/stacks/{id}/certificate', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CertificateResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_cluster_request_from_name(self, name, **kwargs):
"""
retrieve stack request by stack name
Stacks are template instances - a running cloud infrastructure created based on a template. Stacks are always launched on behalf of a cloud user account. Stacks support a wide range of resources, allowing you to build a highly available, reliable, and scalable infrastructure for your application needs.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_cluster_request_from_name(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: (required)
:return: StackV2Request
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_cluster_request_from_name_with_http_info(name, **kwargs)
else:
(data) = self.get_cluster_request_from_name_with_http_info(name, **kwargs)
return data
def get_cluster_request_from_name_with_http_info(self, name, **kwargs):
"""
retrieve stack request by stack name
Stacks are template instances - a running cloud infrastructure created based on a template. Stacks are always launched on behalf of a cloud user account. Stacks support a wide range of resources, allowing you to build a highly available, reliable, and scalable infrastructure for your application needs.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_cluster_request_from_name_with_http_info(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: (required)
:return: StackV2Request
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_cluster_request_from_name" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `get_cluster_request_from_name`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v2/stacks/{name}/request', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='StackV2Request',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_private_stack_v2(self, name, **kwargs):
"""
retrieve a private stack by name
Stacks are template instances - a running cloud infrastructure created based on a template. Stacks are always launched on behalf of a cloud user account. Stacks support a wide range of resources, allowing you to build a highly available, reliable, and scalable infrastructure for your application needs.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_private_stack_v2(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: (required)
:param list[str] entry:
:return: StackResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_private_stack_v2_with_http_info(name, **kwargs)
else:
(data) = self.get_private_stack_v2_with_http_info(name, **kwargs)
return data
def get_private_stack_v2_with_http_info(self, name, **kwargs):
"""
retrieve a private stack by name
Stacks are template instances - a running cloud infrastructure created based on a template. Stacks are always launched on behalf of a cloud user account. Stacks support a wide range of resources, allowing you to build a highly available, reliable, and scalable infrastructure for your application needs.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_private_stack_v2_with_http_info(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: (required)
:param list[str] entry:
:return: StackResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'entry']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_private_stack_v2" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `get_private_stack_v2`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
query_params = []
if 'entry' in params:
query_params.append(('entry', params['entry']))
collection_formats['entry'] = 'multi'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v2/stacks/user/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='StackResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_privates_stack_v2(self, **kwargs):
"""
retrieve private stack
Stacks are template instances - a running cloud infrastructure created based on a template. Stacks are always launched on behalf of a cloud user account. Stacks support a wide range of resources, allowing you to build a highly available, reliable, and scalable infrastructure for your application needs.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_privates_stack_v2(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: list[StackResponse]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_privates_stack_v2_with_http_info(**kwargs)
else:
(data) = self.get_privates_stack_v2_with_http_info(**kwargs)
return data
def get_privates_stack_v2_with_http_info(self, **kwargs):
"""
retrieve private stack
Stacks are template instances - a running cloud infrastructure created based on a template. Stacks are always launched on behalf of a cloud user account. Stacks support a wide range of resources, allowing you to build a highly available, reliable, and scalable infrastructure for your application needs.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_privates_stack_v2_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: list[StackResponse]
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_privates_stack_v2" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v2/stacks/user', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[StackResponse]',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_public_stack_v2(self, name, **kwargs):
"""
retrieve a public or private (owned) stack by name
Stacks are template instances - a running cloud infrastructure created based on a template. Stacks are always launched on behalf of a cloud user account. Stacks support a wide range of resources, allowing you to build a highly available, reliable, and scalable infrastructure for your application needs.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_public_stack_v2(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: (required)
:param list[str] entry:
:return: StackResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_public_stack_v2_with_http_info(name, **kwargs)
else:
(data) = self.get_public_stack_v2_with_http_info(name, **kwargs)
return data
def get_public_stack_v2_with_http_info(self, name, **kwargs):
"""
retrieve a public or private (owned) stack by name
Stacks are template instances - a running cloud infrastructure created based on a template. Stacks are always launched on behalf of a cloud user account. Stacks support a wide range of resources, allowing you to build a highly available, reliable, and scalable infrastructure for your application needs.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_public_stack_v2_with_http_info(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: (required)
:param list[str] entry:
:return: StackResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'entry']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_public_stack_v2" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `get_public_stack_v2`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
query_params = []
if 'entry' in params:
query_params.append(('entry', params['entry']))
collection_formats['entry'] = 'multi'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v2/stacks/account/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='StackResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_publics_stack_v2(self, **kwargs):
"""
retrieve public and private (owned) stacks
Stacks are template instances - a running cloud infrastructure created based on a template. Stacks are always launched on behalf of a cloud user account. Stacks support a wide range of resources, allowing you to build a highly available, reliable, and scalable infrastructure for your application needs.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_publics_stack_v2(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: list[StackResponse]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_publics_stack_v2_with_http_info(**kwargs)
else:
(data) = self.get_publics_stack_v2_with_http_info(**kwargs)
return data
def get_publics_stack_v2_with_http_info(self, **kwargs):
"""
retrieve public and private (owned) stacks
Stacks are template instances - a running cloud infrastructure created based on a template. Stacks are always launched on behalf of a cloud user account. Stacks support a wide range of resources, allowing you to build a highly available, reliable, and scalable infrastructure for your application needs.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_publics_stack_v2_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: list[StackResponse]
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_publics_stack_v2" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v2/stacks/account', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[StackResponse]',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_stack_for_ambari_v2(self, **kwargs):
"""
retrieve stack by ambari address
Stacks are template instances - a running cloud infrastructure created based on a template. Stacks are always launched on behalf of a cloud user account. Stacks support a wide range of resources, allowing you to build a highly available, reliable, and scalable infrastructure for your application needs.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_stack_for_ambari_v2(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param AmbariAddress body:
:return: StackResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_stack_for_ambari_v2_with_http_info(**kwargs)
else:
(data) = self.get_stack_for_ambari_v2_with_http_info(**kwargs)
return data
def get_stack_for_ambari_v2_with_http_info(self, **kwargs):
"""
retrieve stack by ambari address
Stacks are template instances - a running cloud infrastructure created based on a template. Stacks are always launched on behalf of a cloud user account. Stacks support a wide range of resources, allowing you to build a highly available, reliable, and scalable infrastructure for your application needs.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_stack_for_ambari_v2_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param AmbariAddress body:
:return: StackResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_stack_for_ambari_v2" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v2/stacks/ambari', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='StackResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_stack_v2(self, id, **kwargs):
"""
retrieve stack by id
Stacks are template instances - a running cloud infrastructure created based on a template. Stacks are always launched on behalf of a cloud user account. Stacks support a wide range of resources, allowing you to build a highly available, reliable, and scalable infrastructure for your application needs.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_stack_v2(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: (required)
:param list[str] entry:
:return: StackResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_stack_v2_with_http_info(id, **kwargs)
else:
(data) = self.get_stack_v2_with_http_info(id, **kwargs)
return data
def get_stack_v2_with_http_info(self, id, **kwargs):
"""
retrieve stack by id
Stacks are template instances - a running cloud infrastructure created based on a template. Stacks are always launched on behalf of a cloud user account. Stacks support a wide range of resources, allowing you to build a highly available, reliable, and scalable infrastructure for your application needs.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_stack_v2_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: (required)
:param list[str] entry:
:return: StackResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'entry']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_stack_v2" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_stack_v2`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
if 'entry' in params:
query_params.append(('entry', params['entry']))
collection_formats['entry'] = 'multi'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v2/stacks/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='StackResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def post_private_stack_v2(self, **kwargs):
"""
create stack as private resource
Stacks are template instances - a running cloud infrastructure created based on a template. Stacks are always launched on behalf of a cloud user account. Stacks support a wide range of resources, allowing you to build a highly available, reliable, and scalable infrastructure for your application needs.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.post_private_stack_v2(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param StackV2Request body:
:return: StackResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.post_private_stack_v2_with_http_info(**kwargs)
else:
(data) = self.post_private_stack_v2_with_http_info(**kwargs)
return data
def post_private_stack_v2_with_http_info(self, **kwargs):
"""
create stack as private resource
Stacks are template instances - a running cloud infrastructure created based on a template. Stacks are always launched on behalf of a cloud user account. Stacks support a wide range of resources, allowing you to build a highly available, reliable, and scalable infrastructure for your application needs.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.post_private_stack_v2_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param StackV2Request body:
:return: StackResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_private_stack_v2" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v2/stacks/user', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='StackResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def post_public_stack_v2(self, **kwargs):
"""
create stack as public resource
Stacks are template instances - a running cloud infrastructure created based on a template. Stacks are always launched on behalf of a cloud user account. Stacks support a wide range of resources, allowing you to build a highly available, reliable, and scalable infrastructure for your application needs.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.post_public_stack_v2(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param StackV2Request body:
:return: StackResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.post_public_stack_v2_with_http_info(**kwargs)
else:
(data) = self.post_public_stack_v2_with_http_info(**kwargs)
return data
def post_public_stack_v2_with_http_info(self, **kwargs):
"""
create stack as public resource
Stacks are template instances - a running cloud infrastructure created based on a template. Stacks are always launched on behalf of a cloud user account. Stacks support a wide range of resources, allowing you to build a highly available, reliable, and scalable infrastructure for your application needs.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.post_public_stack_v2_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param StackV2Request body:
:return: StackResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_public_stack_v2" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v2/stacks/account', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='StackResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def post_public_stack_v2_for_blueprint(self, **kwargs):
"""
create stack as public resource for blueprint
Stacks are template instances - a running cloud infrastructure created based on a template. Stacks are always launched on behalf of a cloud user account. Stacks support a wide range of resources, allowing you to build a highly available, reliable, and scalable infrastructure for your application needs.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.post_public_stack_v2_for_blueprint(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param StackV2Request body:
:return: GeneratedBlueprintResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.post_public_stack_v2_for_blueprint_with_http_info(**kwargs)
else:
(data) = self.post_public_stack_v2_for_blueprint_with_http_info(**kwargs)
return data
def post_public_stack_v2_for_blueprint_with_http_info(self, **kwargs):
"""
create stack as public resource for blueprint
Stacks are template instances - a running cloud infrastructure created based on a template. Stacks are always launched on behalf of a cloud user account. Stacks support a wide range of resources, allowing you to build a highly available, reliable, and scalable infrastructure for your application needs.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.post_public_stack_v2_for_blueprint_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param StackV2Request body:
:return: GeneratedBlueprintResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_public_stack_v2_for_blueprint" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v2/stacks/blueprint', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GeneratedBlueprintResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def putpassword_stack_v2(self, name, **kwargs):
"""
update stack by name
Stacks are template instances - a running cloud infrastructure created based on a template. Stacks are always launched on behalf of a cloud user account. Stacks support a wide range of resources, allowing you to build a highly available, reliable, and scalable infrastructure for your application needs.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.putpassword_stack_v2(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: (required)
:param UserNamePassword body:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.putpassword_stack_v2_with_http_info(name, **kwargs)
else:
(data) = self.putpassword_stack_v2_with_http_info(name, **kwargs)
return data
def putpassword_stack_v2_with_http_info(self, name, **kwargs):
"""
update stack by name
Stacks are template instances - a running cloud infrastructure created based on a template. Stacks are always launched on behalf of a cloud user account. Stacks support a wide range of resources, allowing you to build a highly available, reliable, and scalable infrastructure for your application needs.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.putpassword_stack_v2_with_http_info(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: (required)
:param UserNamePassword body:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method putpassword_stack_v2" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `putpassword_stack_v2`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v2/stacks/ambari_password/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def putreinstall_stack_v2(self, name, **kwargs):
"""
update stack by name
Stacks are template instances - a running cloud infrastructure created based on a template. Stacks are always launched on behalf of a cloud user account. Stacks support a wide range of resources, allowing you to build a highly available, reliable, and scalable infrastructure for your application needs.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.putreinstall_stack_v2(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: (required)
:param ReinstallRequestV2 body:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.putreinstall_stack_v2_with_http_info(name, **kwargs)
else:
(data) = self.putreinstall_stack_v2_with_http_info(name, **kwargs)
return data
def putreinstall_stack_v2_with_http_info(self, name, **kwargs):
"""
update stack by name
Stacks are template instances - a running cloud infrastructure created based on a template. Stacks are always launched on behalf of a cloud user account. Stacks support a wide range of resources, allowing you to build a highly available, reliable, and scalable infrastructure for your application needs.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.putreinstall_stack_v2_with_http_info(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: (required)
:param ReinstallRequestV2 body:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method putreinstall_stack_v2" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `putreinstall_stack_v2`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v2/stacks/reinstall/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def putscaling_stack_v2(self, name, **kwargs):
"""
update stack by name
Stacks are template instances - a running cloud infrastructure created based on a template. Stacks are always launched on behalf of a cloud user account. Stacks support a wide range of resources, allowing you to build a highly available, reliable, and scalable infrastructure for your application needs.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.putscaling_stack_v2(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: (required)
:param StackScaleRequestV2 body:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.putscaling_stack_v2_with_http_info(name, **kwargs)
else:
(data) = self.putscaling_stack_v2_with_http_info(name, **kwargs)
return data
def putscaling_stack_v2_with_http_info(self, name, **kwargs):
"""
update stack by name
Stacks are template instances - a running cloud infrastructure created based on a template. Stacks are always launched on behalf of a cloud user account. Stacks support a wide range of resources, allowing you to build a highly available, reliable, and scalable infrastructure for your application needs.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.putscaling_stack_v2_with_http_info(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: (required)
:param StackScaleRequestV2 body:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method putscaling_stack_v2" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `putscaling_stack_v2`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v2/stacks/scaling/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def putstart_stack_v2(self, name, **kwargs):
"""
update stack by name
Stacks are template instances - a running cloud infrastructure created based on a template. Stacks are always launched on behalf of a cloud user account. Stacks support a wide range of resources, allowing you to build a highly available, reliable, and scalable infrastructure for your application needs.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.putstart_stack_v2(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.putstart_stack_v2_with_http_info(name, **kwargs)
else:
(data) = self.putstart_stack_v2_with_http_info(name, **kwargs)
return data
def putstart_stack_v2_with_http_info(self, name, **kwargs):
"""
update stack by name
Stacks are template instances - a running cloud infrastructure created based on a template. Stacks are always launched on behalf of a cloud user account. Stacks support a wide range of resources, allowing you to build a highly available, reliable, and scalable infrastructure for your application needs.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.putstart_stack_v2_with_http_info(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method putstart_stack_v2" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `putstart_stack_v2`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v2/stacks/start/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def putstop_stack_v2(self, name, **kwargs):
"""
update stack by name
Stacks are template instances - a running cloud infrastructure created based on a template. Stacks are always launched on behalf of a cloud user account. Stacks support a wide range of resources, allowing you to build a highly available, reliable, and scalable infrastructure for your application needs.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.putstop_stack_v2(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.putstop_stack_v2_with_http_info(name, **kwargs)
else:
(data) = self.putstop_stack_v2_with_http_info(name, **kwargs)
return data
def putstop_stack_v2_with_http_info(self, name, **kwargs):
"""
update stack by name
Stacks are template instances - a running cloud infrastructure created based on a template. Stacks are always launched on behalf of a cloud user account. Stacks support a wide range of resources, allowing you to build a highly available, reliable, and scalable infrastructure for your application needs.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.putstop_stack_v2_with_http_info(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method putstop_stack_v2" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `putstop_stack_v2`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v2/stacks/stop/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def putsync_stack_v2(self, name, **kwargs):
"""
update stack by name
Stacks are template instances - a running cloud infrastructure created based on a template. Stacks are always launched on behalf of a cloud user account. Stacks support a wide range of resources, allowing you to build a highly available, reliable, and scalable infrastructure for your application needs.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.putsync_stack_v2(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.putsync_stack_v2_with_http_info(name, **kwargs)
else:
(data) = self.putsync_stack_v2_with_http_info(name, **kwargs)
return data
def putsync_stack_v2_with_http_info(self, name, **kwargs):
"""
update stack by name
Stacks are template instances - a running cloud infrastructure created based on a template. Stacks are always launched on behalf of a cloud user account. Stacks support a wide range of resources, allowing you to build a highly available, reliable, and scalable infrastructure for your application needs.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.putsync_stack_v2_with_http_info(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method putsync_stack_v2" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `putsync_stack_v2`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v2/stacks/sync/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def repair_cluster_v2(self, name, **kwargs):
"""
repair the cluster
Removing the failed nodes and starting new nodes to substitute them.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.repair_cluster_v2(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: (required)
:param ClusterRepairRequest body:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.repair_cluster_v2_with_http_info(name, **kwargs)
else:
(data) = self.repair_cluster_v2_with_http_info(name, **kwargs)
return data
def repair_cluster_v2_with_http_info(self, name, **kwargs):
"""
repair the cluster
Removing the failed nodes and starting new nodes to substitute them.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.repair_cluster_v2_with_http_info(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: (required)
:param ClusterRepairRequest body:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method repair_cluster_v2" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `repair_cluster_v2`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v2/stacks/{name}/manualrepair', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def retry_stack(self, name, **kwargs):
"""
retry stack and cluster provisioning of failed stack
Failed or interrupted stack and cluster operations can be retried, after the cause of the failure was eliminated. The operations will continue at the state, where the previous process failed.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.retry_stack(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.retry_stack_with_http_info(name, **kwargs)
else:
(data) = self.retry_stack_with_http_info(name, **kwargs)
return data
def retry_stack_with_http_info(self, name, **kwargs):
"""
retry stack and cluster provisioning of failed stack
Failed or interrupted stack and cluster operations can be retried, after the cause of the failure was eliminated. The operations will continue at the state, where the previous process failed.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.retry_stack_with_http_info(name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method retry_stack" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `retry_stack`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v2/stacks/{name}/retry', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def status_stack_v2(self, id, **kwargs):
"""
retrieve stack status by stack id
Stacks are template instances - a running cloud infrastructure created based on a template. Stacks are always launched on behalf of a cloud user account. Stacks support a wide range of resources, allowing you to build a highly available, reliable, and scalable infrastructure for your application needs.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.status_stack_v2(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: (required)
:return: dict(str, object)
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.status_stack_v2_with_http_info(id, **kwargs)
else:
(data) = self.status_stack_v2_with_http_info(id, **kwargs)
return data
def status_stack_v2_with_http_info(self, id, **kwargs):
"""
retrieve stack status by stack id
Stacks are template instances - a running cloud infrastructure created based on a template. Stacks are always launched on behalf of a cloud user account. Stacks support a wide range of resources, allowing you to build a highly available, reliable, and scalable infrastructure for your application needs.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.status_stack_v2_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: (required)
:return: dict(str, object)
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method status_stack_v2" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `status_stack_v2`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v2/stacks/{id}/status', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='dict(str, object)',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def validate_stack_v2(self, **kwargs):
"""
validate stack
Stacks are template instances - a running cloud infrastructure created based on a template. Stacks are always launched on behalf of a cloud user account. Stacks support a wide range of resources, allowing you to build a highly available, reliable, and scalable infrastructure for your application needs.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.validate_stack_v2(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param StackValidationRequest body:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.validate_stack_v2_with_http_info(**kwargs)
else:
(data) = self.validate_stack_v2_with_http_info(**kwargs)
return data
def validate_stack_v2_with_http_info(self, **kwargs):
"""
validate stack
Stacks are template instances - a running cloud infrastructure created based on a template. Stacks are always launched on behalf of a cloud user account. Stacks support a wide range of resources, allowing you to build a highly available, reliable, and scalable infrastructure for your application needs.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.validate_stack_v2_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param StackValidationRequest body:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method validate_stack_v2" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v2/stacks/validate', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def variants_stack_v2(self, **kwargs):
"""
retrieve available platform variants
Stacks are template instances - a running cloud infrastructure created based on a template. Stacks are always launched on behalf of a cloud user account. Stacks support a wide range of resources, allowing you to build a highly available, reliable, and scalable infrastructure for your application needs.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.variants_stack_v2(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: PlatformVariantsJson
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.variants_stack_v2_with_http_info(**kwargs)
else:
(data) = self.variants_stack_v2_with_http_info(**kwargs)
return data
def variants_stack_v2_with_http_info(self, **kwargs):
"""
retrieve available platform variants
Stacks are template instances - a running cloud infrastructure created based on a template. Stacks are always launched on behalf of a cloud user account. Stacks support a wide range of resources, allowing you to build a highly available, reliable, and scalable infrastructure for your application needs.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.variants_stack_v2_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: PlatformVariantsJson
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method variants_stack_v2" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/v2/stacks/platformVariants', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PlatformVariantsJson',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 45.436058
| 984
| 0.585043
|
339a56287c98095d3ef95540c4e0c671867baae0
| 32,839
|
py
|
Python
|
google/cloud/compute_v1/services/region_network_endpoint_groups/client.py
|
auphofBSF/python-compute
|
c81bfa752c9db93edd0cd56fec3a79599704d792
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/compute_v1/services/region_network_endpoint_groups/client.py
|
auphofBSF/python-compute
|
c81bfa752c9db93edd0cd56fec3a79599704d792
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/compute_v1/services/region_network_endpoint_groups/client.py
|
auphofBSF/python-compute
|
c81bfa752c9db93edd0cd56fec3a79599704d792
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.compute_v1.services.region_network_endpoint_groups import pagers
from google.cloud.compute_v1.types import compute
from .transports.base import RegionNetworkEndpointGroupsTransport, DEFAULT_CLIENT_INFO
from .transports.rest import RegionNetworkEndpointGroupsRestTransport
class RegionNetworkEndpointGroupsClientMeta(type):
"""Metaclass for the RegionNetworkEndpointGroups client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[RegionNetworkEndpointGroupsTransport]]
_transport_registry["rest"] = RegionNetworkEndpointGroupsRestTransport
def get_transport_class(
cls, label: str = None,
) -> Type[RegionNetworkEndpointGroupsTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class RegionNetworkEndpointGroupsClient(
metaclass=RegionNetworkEndpointGroupsClientMeta
):
"""The RegionNetworkEndpointGroups API."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "compute.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
RegionNetworkEndpointGroupsClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
RegionNetworkEndpointGroupsClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> RegionNetworkEndpointGroupsTransport:
"""Return the transport used by the client instance.
Returns:
RegionNetworkEndpointGroupsTransport: The transport used by the client instance.
"""
return self._transport
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Return a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Return a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Return a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Return a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, RegionNetworkEndpointGroupsTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the region network endpoint groups client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, RegionNetworkEndpointGroupsTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(
util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))
)
client_cert_source_func = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
is_mtls = True
client_cert_source_func = client_options.client_cert_source
else:
is_mtls = mtls.has_default_client_cert_source()
client_cert_source_func = (
mtls.default_client_cert_source() if is_mtls else None
)
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, RegionNetworkEndpointGroupsTransport):
# transport is a RegionNetworkEndpointGroupsTransport instance.
if credentials or client_options.credentials_file:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, "
"provide its scopes directly."
)
self._transport = transport
else:
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
)
def delete(
self,
request: compute.DeleteRegionNetworkEndpointGroupRequest = None,
*,
project: str = None,
region: str = None,
network_endpoint_group: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Operation:
r"""Deletes the specified network endpoint group. Note
that the NEG cannot be deleted if it is configured as a
backend of a backend service.
Args:
request (google.cloud.compute_v1.types.DeleteRegionNetworkEndpointGroupRequest):
The request object. A request message for
RegionNetworkEndpointGroups.Delete. See the method
description for details.
project (str):
Project ID for this request.
This corresponds to the ``project`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
region (str):
The name of the region where the
network endpoint group is located. It
should comply with RFC1035.
This corresponds to the ``region`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
network_endpoint_group (str):
The name of the network endpoint
group to delete. It should comply with
RFC1035.
This corresponds to the ``network_endpoint_group`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.compute_v1.types.Operation:
Represents an Operation resource.
Google Compute Engine has three Operation resources:
- [Global](/compute/docs/reference/rest/{$api_version}/globalOperations)
\*
[Regional](/compute/docs/reference/rest/{$api_version}/regionOperations)
\*
[Zonal](/compute/docs/reference/rest/{$api_version}/zoneOperations)
You can use an operation resource to manage
asynchronous API requests. For more information, read
Handling API responses.
Operations can be global, regional or zonal. - For
global operations, use the globalOperations resource.
- For regional operations, use the regionOperations
resource. - For zonal operations, use the
zonalOperations resource.
For more information, read Global, Regional, and
Zonal Resources. (== resource_for
{$api_version}.globalOperations ==) (== resource_for
{$api_version}.regionOperations ==) (== resource_for
{$api_version}.zoneOperations ==)
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project, region, network_endpoint_group])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a compute.DeleteRegionNetworkEndpointGroupRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, compute.DeleteRegionNetworkEndpointGroupRequest):
request = compute.DeleteRegionNetworkEndpointGroupRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project is not None:
request.project = project
if region is not None:
request.region = region
if network_endpoint_group is not None:
request.network_endpoint_group = network_endpoint_group
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete]
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def get(
self,
request: compute.GetRegionNetworkEndpointGroupRequest = None,
*,
project: str = None,
region: str = None,
network_endpoint_group: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.NetworkEndpointGroup:
r"""Returns the specified network endpoint group. Gets a
list of available network endpoint groups by making a
list() request.
Args:
request (google.cloud.compute_v1.types.GetRegionNetworkEndpointGroupRequest):
The request object. A request message for
RegionNetworkEndpointGroups.Get. See the method
description for details.
project (str):
Project ID for this request.
This corresponds to the ``project`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
region (str):
The name of the region where the
network endpoint group is located. It
should comply with RFC1035.
This corresponds to the ``region`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
network_endpoint_group (str):
The name of the network endpoint
group. It should comply with RFC1035.
This corresponds to the ``network_endpoint_group`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.compute_v1.types.NetworkEndpointGroup:
Represents a collection of network endpoints.
A network endpoint group (NEG) defines how a set of
endpoints should be reached, whether they are
reachable, and where they are located. For more
information about using NEGs, see Setting up external
HTTP(S) Load Balancing with internet NEGs, Setting up
zonal NEGs, or Setting up external HTTP(S) Load
Balancing with serverless NEGs. (== resource_for
{$api_version}.networkEndpointGroups ==) (==
resource_for
{$api_version}.globalNetworkEndpointGroups ==) (==
resource_for
{$api_version}.regionNetworkEndpointGroups ==)
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project, region, network_endpoint_group])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a compute.GetRegionNetworkEndpointGroupRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, compute.GetRegionNetworkEndpointGroupRequest):
request = compute.GetRegionNetworkEndpointGroupRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project is not None:
request.project = project
if region is not None:
request.region = region
if network_endpoint_group is not None:
request.network_endpoint_group = network_endpoint_group
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get]
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def insert(
self,
request: compute.InsertRegionNetworkEndpointGroupRequest = None,
*,
project: str = None,
region: str = None,
network_endpoint_group_resource: compute.NetworkEndpointGroup = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Operation:
r"""Creates a network endpoint group in the specified
project using the parameters that are included in the
request.
Args:
request (google.cloud.compute_v1.types.InsertRegionNetworkEndpointGroupRequest):
The request object. A request message for
RegionNetworkEndpointGroups.Insert. See the method
description for details.
project (str):
Project ID for this request.
This corresponds to the ``project`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
region (str):
The name of the region where you want
to create the network endpoint group. It
should comply with RFC1035.
This corresponds to the ``region`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
network_endpoint_group_resource (google.cloud.compute_v1.types.NetworkEndpointGroup):
The body resource for this request
This corresponds to the ``network_endpoint_group_resource`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.compute_v1.types.Operation:
Represents an Operation resource.
Google Compute Engine has three Operation resources:
- [Global](/compute/docs/reference/rest/{$api_version}/globalOperations)
\*
[Regional](/compute/docs/reference/rest/{$api_version}/regionOperations)
\*
[Zonal](/compute/docs/reference/rest/{$api_version}/zoneOperations)
You can use an operation resource to manage
asynchronous API requests. For more information, read
Handling API responses.
Operations can be global, regional or zonal. - For
global operations, use the globalOperations resource.
- For regional operations, use the regionOperations
resource. - For zonal operations, use the
zonalOperations resource.
For more information, read Global, Regional, and
Zonal Resources. (== resource_for
{$api_version}.globalOperations ==) (== resource_for
{$api_version}.regionOperations ==) (== resource_for
{$api_version}.zoneOperations ==)
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project, region, network_endpoint_group_resource])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a compute.InsertRegionNetworkEndpointGroupRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, compute.InsertRegionNetworkEndpointGroupRequest):
request = compute.InsertRegionNetworkEndpointGroupRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project is not None:
request.project = project
if region is not None:
request.region = region
if network_endpoint_group_resource is not None:
request.network_endpoint_group_resource = (
network_endpoint_group_resource
)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.insert]
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def list(
self,
request: compute.ListRegionNetworkEndpointGroupsRequest = None,
*,
project: str = None,
region: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListPager:
r"""Retrieves the list of regional network endpoint
groups available to the specified project in the given
region.
Args:
request (google.cloud.compute_v1.types.ListRegionNetworkEndpointGroupsRequest):
The request object. A request message for
RegionNetworkEndpointGroups.List. See the method
description for details.
project (str):
Project ID for this request.
This corresponds to the ``project`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
region (str):
The name of the region where the
network endpoint group is located. It
should comply with RFC1035.
This corresponds to the ``region`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.compute_v1.services.region_network_endpoint_groups.pagers.ListPager:
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project, region])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a compute.ListRegionNetworkEndpointGroupsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, compute.ListRegionNetworkEndpointGroupsRequest):
request = compute.ListRegionNetworkEndpointGroupsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project is not None:
request.project = project
if region is not None:
request.region = region
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list]
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-cloud-compute",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("RegionNetworkEndpointGroupsClient",)
| 44.197847
| 106
| 0.624745
|
c6b0ad6e7dd63ae541b518cf97cd025d397247ac
| 10,860
|
py
|
Python
|
lib/model/config.py
|
Scofieldtangsiwei/small_object_detection
|
fb86b06f3301f9e1f9d66fd4caf662750ac9a44e
|
[
"MIT"
] | 3
|
2018-11-07T22:49:30.000Z
|
2019-06-22T06:23:46.000Z
|
lib/model/config.py
|
Scofieldtangsiwei/small_object_detection
|
fb86b06f3301f9e1f9d66fd4caf662750ac9a44e
|
[
"MIT"
] | null | null | null |
lib/model/config.py
|
Scofieldtangsiwei/small_object_detection
|
fb86b06f3301f9e1f9d66fd4caf662750ac9a44e
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import os.path as osp
import numpy as np
# `pip install easydict` if you don't have it
from easydict import EasyDict as edict
__C = edict()
# Consumers can get config by:
# from fast_rcnn_config import cfg
cfg = __C
#
# Training options
#
__C.TRAIN = edict()
# Initial learning rate
__C.TRAIN.LEARNING_RATE = 0.001
# Momentum
__C.TRAIN.MOMENTUM = 0.9
# Weight decay, for regularization
__C.TRAIN.WEIGHT_DECAY = 0.0001
# Factor for reducing the learning rate
__C.TRAIN.GAMMA = 0.1
# Step size for reducing the learning rate, currently only support one step
__C.TRAIN.STEPSIZE = [30000]
# Iteration intervals for showing the loss during training, on command line interface
__C.TRAIN.DISPLAY = 10
# Whether to double the learning rate for bias
__C.TRAIN.DOUBLE_BIAS = True
# Whether to initialize the weights with truncated normal distribution
__C.TRAIN.TRUNCATED = False
# Whether to have weight decay on bias as well
__C.TRAIN.BIAS_DECAY = False
# Whether to add ground truth boxes to the pool when sampling regions
__C.TRAIN.USE_GT = False
# Whether to use aspect-ratio grouping of training images, introduced merely for saving
# GPU memory
__C.TRAIN.ASPECT_GROUPING = False
# The number of snapshots kept, older ones are deleted to save space
__C.TRAIN.SNAPSHOT_KEPT = 3
# The time interval for saving tensorflow summaries
__C.TRAIN.SUMMARY_INTERVAL = 180
# Scale to use during training (can list multiple scales)
# The scale is the pixel size of an image's shortest side
__C.TRAIN.SCALES_1 = (1600,)
__C.TRAIN.SCALES_2 = (800,)
# Max pixel size of the longest side of a scaled input image
__C.TRAIN.MAX_SIZE_1 = 2000
__C.TRAIN.MAX_SIZE_2 = 1000
# Images to use per minibatch
__C.TRAIN.IMS_PER_BATCH = 1
# Minibatch size (number of regions of interest [ROIs])
__C.TRAIN.BATCH_SIZE = 128
# Fraction of minibatch that is labeled foreground (i.e. class > 0)
__C.TRAIN.FG_FRACTION = 0.25
# Overlap threshold for a ROI to be considered foreground (if >= FG_THRESH)
__C.TRAIN.FG_THRESH = 0.5
# Overlap threshold for a ROI to be considered background (class = 0 if
# overlap in [LO, HI))
__C.TRAIN.BG_THRESH_HI = 0.5
__C.TRAIN.BG_THRESH_LO = 0.1
# Use horizontally-flipped images during training?
__C.TRAIN.USE_FLIPPED = True
# Train bounding-box regressors
__C.TRAIN.BBOX_REG = True
# Overlap required between a ROI and ground-truth box in order for that ROI to
# be used as a bounding-box regression training example
__C.TRAIN.BBOX_THRESH = 0.5
# Iterations between snapshots
__C.TRAIN.SNAPSHOT_ITERS = 5000
# solver.prototxt specifies the snapshot path prefix, this adds an optional
# infix to yield the path: <prefix>[_<infix>]_iters_XYZ.caffemodel
__C.TRAIN.SNAPSHOT_PREFIX = 'res101_faster_rcnn'
# Normalize the targets (subtract empirical mean, divide by empirical stddev)
__C.TRAIN.BBOX_NORMALIZE_TARGETS = True
# Deprecated (inside weights)
__C.TRAIN.BBOX_INSIDE_WEIGHTS = (1.0, 1.0, 1.0, 1.0)
# Normalize the targets using "precomputed" (or made up) means and stdevs
# (BBOX_NORMALIZE_TARGETS must also be True)
__C.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED = True
__C.TRAIN.BBOX_NORMALIZE_MEANS = (0.0, 0.0, 0.0, 0.0)
__C.TRAIN.BBOX_NORMALIZE_STDS = (0.1, 0.1, 0.2, 0.2)
# Train using these proposals
__C.TRAIN.PROPOSAL_METHOD = 'gt'
# Make minibatches from images that have similar aspect ratios (i.e. both
# tall and thin or both short and wide) in order to avoid wasting computation
# on zero-padding.
# Use RPN to detect objects
__C.TRAIN.HAS_RPN = True
# IOU >= thresh: positive example
__C.TRAIN.RPN_POSITIVE_OVERLAP = 0.7
# IOU < thresh: negative example
__C.TRAIN.RPN_NEGATIVE_OVERLAP = 0.3
# If an anchor satisfied by positive and negative conditions set to negative
__C.TRAIN.RPN_CLOBBER_POSITIVES = False
# Max number of foreground examples
__C.TRAIN.RPN_FG_FRACTION = 0.5
# Total number of examples
__C.TRAIN.RPN_BATCHSIZE = 256
# NMS threshold used on RPN proposals
__C.TRAIN.RPN_NMS_THRESH = 0.7
# Number of top scoring boxes to keep before apply NMS to RPN proposals
__C.TRAIN.RPN_PRE_NMS_TOP_N = 12000
# Number of top scoring boxes to keep after applying NMS to RPN proposals
__C.TRAIN.RPN_POST_NMS_TOP_N = 2000
# Deprecated (outside weights)
__C.TRAIN.RPN_BBOX_INSIDE_WEIGHTS = (1.0, 1.0, 1.0, 1.0)
# Give the positive RPN examples weight of p * 1 / {num positives}
# and give negatives a weight of (1 - p)
# Set to -1.0 to use uniform example weighting
__C.TRAIN.RPN_POSITIVE_WEIGHT = -1.0
# Whether to use all ground truth bounding boxes for training,
# For COCO, setting USE_ALL_GT to False will exclude boxes that are flagged as ''iscrowd''
__C.TRAIN.USE_ALL_GT = True
#
# Testing options
#
__C.TEST = edict()
# Scale to use during testing (can NOT list multiple scales)
# The scale is the pixel size of an image's shortest side
__C.TEST.SCALES_1 = (1600,)
__C.TEST.SCALES_2 = (800,)
# Max pixel size of the longest side of a scaled input image
__C.TEST.MAX_SIZE = 2000
# Overlap threshold used for non-maximum suppression (suppress boxes with
# IoU >= this threshold)
__C.TEST.NMS = 0.3
# Experimental: treat the (K+1) units in the cls_score layer as linear
# predictors (trained, eg, with one-vs-rest SVMs).
__C.TEST.SVM = False
# Test using bounding-box regressors
__C.TEST.BBOX_REG = True
# Propose boxes
__C.TEST.HAS_RPN = False
# Test using these proposals
__C.TEST.PROPOSAL_METHOD = 'gt'
## NMS threshold used on RPN proposals
__C.TEST.RPN_NMS_THRESH = 0.7
# Number of top scoring boxes to keep before apply NMS to RPN proposals
__C.TEST.RPN_PRE_NMS_TOP_N = 6000
# Number of top scoring boxes to keep after applying NMS to RPN proposals
__C.TEST.RPN_POST_NMS_TOP_N = 300
# Proposal height and width both need to be greater than RPN_MIN_SIZE (at orig image scale)
# __C.TEST.RPN_MIN_SIZE = 16
# Testing mode, default to be 'nms', 'top' is slower but better
# See report for details
__C.TEST.MODE = 'nms'
# Only useful when TEST.MODE is 'top', specifies the number of top proposals to select
__C.TEST.RPN_TOP_N = 5000
#
# ResNet options
#
__C.RESNET = edict()
# Option to set if max-pooling is appended after crop_and_resize.
# if true, the region will be resized to a square of 2xPOOLING_SIZE,
# then 2x2 max-pooling is applied; otherwise the region will be directly
# resized to a square of POOLING_SIZE
__C.RESNET.MAX_POOL = False
# Number of fixed blocks during training, by default the first of all 4 blocks is fixed
# Range: 0 (none) to 3 (all)
__C.RESNET.FIXED_BLOCKS = 1
#
# MobileNet options
#
__C.MOBILENET = edict()
# Whether to regularize the depth-wise filters during training
__C.MOBILENET.REGU_DEPTH = False
# Number of fixed layers during training, by default the bottom 5 of 14 layers is fixed
# Range: 0 (none) to 12 (all)
__C.MOBILENET.FIXED_LAYERS = 5
# Weight decay for the mobilenet weights
__C.MOBILENET.WEIGHT_DECAY = 0.00004
# Depth multiplier
__C.MOBILENET.DEPTH_MULTIPLIER = 1.
#
# MISC
#
# Pixel mean values (BGR order) as a (1, 1, 3) array
# We use the same pixel mean for all networks even though it's not exactly what
# they were trained with
__C.PIXEL_MEANS = np.array([[[102.9801, 115.9465, 122.7717]]])
# For reproducibility
__C.RNG_SEED = 3
# Root directory of project
__C.ROOT_DIR = osp.abspath(osp.join(osp.dirname(__file__), '..', '..'))
# Data directory
__C.DATA_DIR = osp.abspath(osp.join(__C.ROOT_DIR, 'data'))
# Name (or path to) the matlab executable
__C.MATLAB = 'matlab'
# Place outputs under an experiments directory
__C.EXP_DIR = 'default'
# Use GPU implementation of non-maximum suppression
__C.USE_GPU_NMS = True
# Default pooling mode, only 'crop' is available
__C.POOLING_MODE = 'crop'
# Size of the pooled region after RoI pooling
__C.POOLING_SIZE = 7
# Anchor scales for RPN
__C.ANCHOR_SCALES = [8,16,32]
# Anchor ratios for RPN
__C.ANCHOR_RATIOS = [0.5,1,2]
# Number of filters for the RPN layer
__C.RPN_CHANNELS = 512
def get_output_dir(imdb, weights_filename):
"""Return the directory where experimental artifacts are placed.
If the directory does not exist, it is created.
A canonical path is built using the name from an imdb and a network
(if not None).
"""
outdir = osp.abspath(osp.join(__C.ROOT_DIR, 'output', __C.EXP_DIR, imdb.name))
if weights_filename is None:
weights_filename = 'default'
outdir = osp.join(outdir, weights_filename)
if not os.path.exists(outdir):
os.makedirs(outdir)
return outdir
def get_output_tb_dir(imdb, weights_filename):
"""Return the directory where tensorflow summaries are placed.
If the directory does not exist, it is created.
A canonical path is built using the name from an imdb and a network
(if not None).
"""
outdir = osp.abspath(osp.join(__C.ROOT_DIR, 'tensorboard', __C.EXP_DIR, imdb.name))
if weights_filename is None:
weights_filename = 'default'
outdir = osp.join(outdir, weights_filename)
if not os.path.exists(outdir):
os.makedirs(outdir)
return outdir
def _merge_a_into_b(a, b):
"""Merge config dictionary a into config dictionary b, clobbering the
options in b whenever they are also specified in a.
"""
if type(a) is not edict:
return
for k, v in a.items():
# a must specify keys that are in b
if k not in b:
raise KeyError('{} is not a valid config key'.format(k))
# the types must match, too
old_type = type(b[k])
if old_type is not type(v):
if isinstance(b[k], np.ndarray):
v = np.array(v, dtype=b[k].dtype)
else:
raise ValueError(('Type mismatch ({} vs. {}) '
'for config key: {}').format(type(b[k]),
type(v), k))
# recursively merge dicts
if type(v) is edict:
try:
_merge_a_into_b(a[k], b[k])
except:
print(('Error under config key: {}'.format(k)))
raise
else:
b[k] = v
def cfg_from_file(filename):
"""Load a config file and merge it into the default options."""
import yaml
with open(filename, 'r') as f:
yaml_cfg = edict(yaml.load(f))
_merge_a_into_b(yaml_cfg, __C)
def cfg_from_list(cfg_list):
"""Set config keys via list (e.g., from command line)."""
from ast import literal_eval
assert len(cfg_list) % 2 == 0
for k, v in zip(cfg_list[0::2], cfg_list[1::2]):
key_list = k.split('.')
d = __C
for subkey in key_list[:-1]:
assert subkey in d
d = d[subkey]
subkey = key_list[-1]
assert subkey in d
try:
value = literal_eval(v)
except:
# handle the case when v is a string literal
value = v
assert type(value) == type(d[subkey]), \
'type {} does not match original type {}'.format(
type(value), type(d[subkey]))
d[subkey] = value
| 27.917738
| 91
| 0.72523
|
60e049376c0a9c2f5a535634373514585f0ca708
| 5,176
|
py
|
Python
|
docs/conf.py
|
ejhigson/perfectns
|
f20339281bfa45797fcd7815ed3aeab4101d9e95
|
[
"MIT"
] | 4
|
2018-06-08T02:16:38.000Z
|
2018-11-02T13:44:06.000Z
|
docs/conf.py
|
ejhigson/PerfectNS
|
f20339281bfa45797fcd7815ed3aeab4101d9e95
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
ejhigson/PerfectNS
|
f20339281bfa45797fcd7815ed3aeab4101d9e95
|
[
"MIT"
] | 2
|
2018-10-12T17:34:23.000Z
|
2020-06-19T20:55:54.000Z
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/stable/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'perfectns'
copyright = '2018-Present Edward Higson and contributors (MIT license).'
author = 'Edward Higson'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = ''
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'numpydoc',
'nbsphinx'
]
# nbspinx options
nbsphinx_execute = 'never' # use stored output of notebook so data not needed
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '**.ipynb_checkpoints']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'perfectnsdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'perfectns.tex', 'perfectns Documentation',
'Edward Higson', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'perfectns', 'perfectns Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'perfectns', 'perfectns Documentation',
author, 'perfectns', 'Perfect nested sampling.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
| 30.447059
| 79
| 0.651662
|
925dc4ea75970c8b41bdf2dae908d99b15095ff5
| 7,093
|
py
|
Python
|
src/runtime/crt/host/microtvm_api_server.py
|
XiaoSong9905/tvm
|
48940f697e15d5b50fa1f032003e6c700ae1e423
|
[
"Apache-2.0"
] | 4,640
|
2017-08-17T19:22:15.000Z
|
2019-11-04T15:29:46.000Z
|
src/runtime/crt/host/microtvm_api_server.py
|
XiaoSong9905/tvm
|
48940f697e15d5b50fa1f032003e6c700ae1e423
|
[
"Apache-2.0"
] | 3,022
|
2020-11-24T14:02:31.000Z
|
2022-03-31T23:55:31.000Z
|
src/runtime/crt/host/microtvm_api_server.py
|
XiaoSong9905/tvm
|
48940f697e15d5b50fa1f032003e6c700ae1e423
|
[
"Apache-2.0"
] | 1,352
|
2017-08-17T19:30:38.000Z
|
2019-11-04T16:09:29.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import fcntl
import os
import os.path
import pathlib
import select
import shutil
import subprocess
import tarfile
import time
from tvm.micro.project_api import server
PROJECT_DIR = pathlib.Path(os.path.dirname(__file__) or os.path.getcwd())
MODEL_LIBRARY_FORMAT_RELPATH = "model.tar"
IS_TEMPLATE = not os.path.exists(os.path.join(PROJECT_DIR, MODEL_LIBRARY_FORMAT_RELPATH))
class Handler(server.ProjectAPIHandler):
BUILD_TARGET = "build/main"
def __init__(self):
super(Handler, self).__init__()
self._proc = None
def server_info_query(self, tvm_version):
return server.ServerInfo(
platform_name="host",
is_template=IS_TEMPLATE,
model_library_format_path=""
if IS_TEMPLATE
else PROJECT_DIR / MODEL_LIBRARY_FORMAT_RELPATH,
project_options=[
server.ProjectOption(
"verbose",
optional=["build"],
type="bool",
help="Run make with verbose output",
)
],
)
# These files and directories will be recursively copied into generated projects from the CRT.
CRT_COPY_ITEMS = ("include", "Makefile", "src")
# The build target given to make
BUILD_TARGET = "build/main"
def generate_project(self, model_library_format_path, standalone_crt_dir, project_dir, options):
# Make project directory.
project_dir.mkdir(parents=True)
# Copy ourselves to the generated project. TVM may perform further build steps on the generated project
# by launching the copy.
shutil.copy2(__file__, project_dir / os.path.basename(__file__))
# Place Model Library Format tarball in the special location, which this script uses to decide
# whether it's being invoked in a template or generated project.
project_model_library_format_path = project_dir / MODEL_LIBRARY_FORMAT_RELPATH
shutil.copy2(model_library_format_path, project_model_library_format_path)
# Extract Model Library Format tarball.into <project_dir>/model.
extract_path = project_dir / project_model_library_format_path.stem
with tarfile.TarFile(project_model_library_format_path) as tf:
os.makedirs(extract_path)
tf.extractall(path=extract_path)
# Populate CRT.
crt_path = project_dir / "crt"
os.mkdir(crt_path)
for item in self.CRT_COPY_ITEMS:
src_path = standalone_crt_dir / item
dst_path = crt_path / item
if os.path.isdir(src_path):
shutil.copytree(src_path, dst_path)
else:
shutil.copy2(src_path, dst_path)
# Populate Makefile.
shutil.copy2(pathlib.Path(__file__).parent / "Makefile", project_dir / "Makefile")
# Populate crt-config.h
crt_config_dir = project_dir / "crt_config"
crt_config_dir.mkdir()
shutil.copy2(
os.path.join(os.path.dirname(__file__), "..", "crt_config-template.h"),
os.path.join(crt_config_dir, "crt_config.h"),
)
# Populate src/
src_dir = os.path.join(project_dir, "src")
os.mkdir(src_dir)
shutil.copy2(
os.path.join(os.path.dirname(__file__), "main.cc"), os.path.join(src_dir, "main.cc")
)
def build(self, options):
args = ["make"]
if options.get("verbose"):
args.append("VERBOSE=1")
args.append(self.BUILD_TARGET)
subprocess.check_call(args, cwd=PROJECT_DIR)
def flash(self, options):
pass # Flashing does nothing on host.
def _set_nonblock(self, fd):
flag = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, flag | os.O_NONBLOCK)
new_flag = fcntl.fcntl(fd, fcntl.F_GETFL)
assert (new_flag & os.O_NONBLOCK) != 0, "Cannot set file descriptor {fd} to non-blocking"
def open_transport(self, options):
self._proc = subprocess.Popen(
[self.BUILD_TARGET], stdin=subprocess.PIPE, stdout=subprocess.PIPE, bufsize=0
)
self._set_nonblock(self._proc.stdin.fileno())
self._set_nonblock(self._proc.stdout.fileno())
return server.TransportTimeouts(
session_start_retry_timeout_sec=0,
session_start_timeout_sec=0,
session_established_timeout_sec=0,
)
def close_transport(self):
if self._proc is not None:
proc = self._proc
self._proc = None
proc.terminate()
proc.wait()
def _await_ready(self, rlist, wlist, timeout_sec=None, end_time=None):
if timeout_sec is None and end_time is not None:
timeout_sec = max(0, end_time - time.monotonic())
rlist, wlist, xlist = select.select(rlist, wlist, rlist + wlist, timeout_sec)
if not rlist and not wlist and not xlist:
raise server.IoTimeoutError()
return True
def read_transport(self, n, timeout_sec):
if self._proc is None:
raise server.TransportClosedError()
fd = self._proc.stdout.fileno()
end_time = None if timeout_sec is None else time.monotonic() + timeout_sec
try:
self._await_ready([fd], [], end_time=end_time)
to_return = os.read(fd, n)
except BrokenPipeError:
to_return = 0
if not to_return:
self.disconnect_transport()
raise server.TransportClosedError()
return to_return
def write_transport(self, data, timeout_sec):
if self._proc is None:
raise server.TransportClosedError()
fd = self._proc.stdin.fileno()
end_time = None if timeout_sec is None else time.monotonic() + timeout_sec
data_len = len(data)
while data:
self._await_ready([], [fd], end_time=end_time)
try:
num_written = os.write(fd, data)
except BrokenPipeError:
num_written = 0
if not num_written:
self.disconnect_transport()
raise server.TransportClosedError()
data = data[num_written:]
if __name__ == "__main__":
server.main(Handler())
| 34.100962
| 111
| 0.642887
|
5e41eaf695e0f150d71efc0d4c260e387c7a1929
| 2,030
|
py
|
Python
|
code-docs/conf.py
|
acope3/riboviz
|
03a4f13b2d833b8650ebf33bdce81fe2639eb9cf
|
[
"Apache-2.0"
] | null | null | null |
code-docs/conf.py
|
acope3/riboviz
|
03a4f13b2d833b8650ebf33bdce81fe2639eb9cf
|
[
"Apache-2.0"
] | null | null | null |
code-docs/conf.py
|
acope3/riboviz
|
03a4f13b2d833b8650ebf33bdce81fe2639eb9cf
|
[
"Apache-2.0"
] | null | null | null |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../../riboviz'))
# -- Project information -----------------------------------------------------
project = 'RiboViz'
copyright = '2020, The University of Edinburgh; Rutgers University; University of California, Berkeley'
author = 'The University of Edinburgh; Rutgers University; University of California, Berkeley'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
| 37.592593
| 103
| 0.674877
|
44f24401fdbc92da285693e3b36cb684f25ddbeb
| 559
|
py
|
Python
|
cogs/lenny.py
|
Itai12/Amanager
|
a470f92a645115ad51adaec4f517dd4d00b2c05e
|
[
"Apache-2.0"
] | null | null | null |
cogs/lenny.py
|
Itai12/Amanager
|
a470f92a645115ad51adaec4f517dd4d00b2c05e
|
[
"Apache-2.0"
] | null | null | null |
cogs/lenny.py
|
Itai12/Amanager
|
a470f92a645115ad51adaec4f517dd4d00b2c05e
|
[
"Apache-2.0"
] | null | null | null |
import discord, random, json
from discord.ext import commands
from discord_slash import cog_ext, SlashContext
class Slash(commands.Cog):
def __init__(self, bot):
self.bot = bot
@cog_ext.cog_slash(name="lenny", description="Envoyer un lenny !")
async def _lenny(self, ctx):
a_file = open("no-move.json", "r")
json_object_nm = json.load(a_file)
a_file.close()
await ctx.send(random.choice(json_object_nm['lenny']))
def setup(bot):
bot.add_cog(Slash(bot))
def teardown(bot):
bot.remove_cog("lenny")
| 27.95
| 70
| 0.674419
|
594d3ada041ff0579658431723cb6d60c91e8065
| 3,732
|
py
|
Python
|
cpovc_registry/admin.py
|
Rebeccacheptoek/cpims-ovc-3.0
|
25d34dca2f93fcdb6fc934093b625604b46ddd8d
|
[
"Apache-2.0"
] | 3
|
2022-02-18T13:25:29.000Z
|
2022-02-25T11:49:11.000Z
|
cpovc_registry/admin.py
|
Rebeccacheptoek/cpims-ovc-3.0
|
25d34dca2f93fcdb6fc934093b625604b46ddd8d
|
[
"Apache-2.0"
] | null | null | null |
cpovc_registry/admin.py
|
Rebeccacheptoek/cpims-ovc-3.0
|
25d34dca2f93fcdb6fc934093b625604b46ddd8d
|
[
"Apache-2.0"
] | 22
|
2022-02-05T13:43:53.000Z
|
2022-02-26T14:29:06.000Z
|
"""Admin backend for editing some admin details."""
import csv
import time
from django.contrib import admin
from django.http import HttpResponse
from .models import (RegPerson, RegOrgUnit, RegOrgUnitsAuditTrail,
RegPersonsAuditTrail, RegPersonsTypes)
from cpovc_auth.models import AppUser
def dump_to_csv(modeladmin, request, qs):
"""
These takes in a Django queryset and spits out a CSV file.
Generic method for any queryset
"""
model = qs.model
file_id = 'CPIMS_%s_%d' % (model.__name__, int(time.time()))
file_name = 'attachment; filename=%s.csv' % (file_id)
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = file_name
writer = csv.writer(response, csv.excel)
headers = []
for field in model._meta.fields:
headers.append(field.name)
writer.writerow(headers)
for obj in qs:
row = []
for field in headers:
val = getattr(obj, field)
if callable(val):
val = val()
if type(val) == unicode:
val = val.encode("utf-8")
row.append(val)
writer.writerow(row)
return response
dump_to_csv.short_description = u"Dump to CSV"
class PersonInline(admin.StackedInline):
model = AppUser
exclude = ('password', )
class RegPersonAdmin(admin.ModelAdmin):
"""Register persons admin."""
search_fields = ['first_name', 'surname', 'other_names']
list_display = ['id', 'first_name', 'surname', 'date_of_birth',
'age', 'sex_id', 'is_void']
# readonly_fields = ['id']
list_filter = ['is_void', 'sex_id', 'created_at']
inlines = (PersonInline, )
admin.site.register(RegPerson, RegPersonAdmin)
class RegPersonTypesAdmin(admin.ModelAdmin):
"""Register persons admin."""
search_fields = ['person__surname', 'person__first_name']
list_display = ['id', 'person', 'person_type_id',
'date_created', 'is_void', ]
def date_created(self, obj):
return obj.person.created_at
date_created.admin_order_field = 'date'
date_created.short_description = 'Date Created'
readonly_fields = ['person']
list_filter = ['is_void', 'person_type_id', 'person__created_at']
admin.site.register(RegPersonsTypes, RegPersonTypesAdmin)
class RegOrgUnitAdmin(admin.ModelAdmin):
"""Register persons admin."""
search_fields = ['org_unit_name', 'org_unit_id_vis']
list_display = ['id', 'org_unit_id_vis', 'org_unit_name',
'parent_org_unit_id', 'parent_unit', 'is_void']
# readonly_fields = ['id']
list_filter = ['is_void', 'org_unit_type_id', 'created_at',
'parent_org_unit_id']
actions = [dump_to_csv]
admin.site.register(RegOrgUnit, RegOrgUnitAdmin)
class OrgUnitAuditAdmin(admin.ModelAdmin):
"""Register persons admin."""
search_fields = ['org_unit_id']
list_display = ['transaction_id', 'transaction_type_id', 'ip_address',
'app_user_id', 'timestamp_modified']
# readonly_fields = ['id']
list_filter = ['transaction_type_id', 'app_user_id']
admin.site.register(RegOrgUnitsAuditTrail, OrgUnitAuditAdmin)
class PersonsAuditAdmin(admin.ModelAdmin):
"""Register persons admin."""
search_fields = ['person_id']
list_display = ['transaction_id', 'transaction_type_id', 'ip_address',
'app_user_id', 'timestamp_modified']
# readonly_fields = ['id']
list_filter = ['transaction_type_id', 'app_user_id']
admin.site.register(RegPersonsAuditTrail, PersonsAuditAdmin)
| 30.842975
| 75
| 0.640675
|
d05af9259ff4186e495475edba976dfa6e26c77b
| 519
|
py
|
Python
|
runtests.py
|
Kreios-S-A-R-L/django-languages-plus
|
d1d3e52468b4b010634cc0b531ebc34c035c30f1
|
[
"MIT"
] | null | null | null |
runtests.py
|
Kreios-S-A-R-L/django-languages-plus
|
d1d3e52468b4b010634cc0b531ebc34c035c30f1
|
[
"MIT"
] | null | null | null |
runtests.py
|
Kreios-S-A-R-L/django-languages-plus
|
d1d3e52468b4b010634cc0b531ebc34c035c30f1
|
[
"MIT"
] | 1
|
2021-11-10T16:09:40.000Z
|
2021-11-10T16:09:40.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8
import os
import sys
import django
from django.conf import settings
from django.test.utils import get_runner
def run_tests(*test_args):
if not test_args:
test_args = ['tests']
os.environ['DJANGO_SETTINGS_MODULE'] = 'tests.settings'
django.setup()
TestRunner = get_runner(settings)
test_runner = TestRunner()
failures = test_runner.run_tests(test_args)
sys.exit(bool(failures))
if __name__ == '__main__':
run_tests(*sys.argv[1:])
| 19.222222
| 59
| 0.695568
|
86e19f9d7bb0d43941cb4c24f497d6f90dbbd62f
| 2,005
|
py
|
Python
|
conduit/settings.py
|
PierreDarcas/2020-2021-devops-final-back
|
b0da5cb36ec3626b0f2f1ce467b2fa4e8e28125c
|
[
"MIT"
] | null | null | null |
conduit/settings.py
|
PierreDarcas/2020-2021-devops-final-back
|
b0da5cb36ec3626b0f2f1ce467b2fa4e8e28125c
|
[
"MIT"
] | 2
|
2020-12-03T08:45:59.000Z
|
2021-01-18T08:40:58.000Z
|
conduit/settings.py
|
PierreDarcas/2020-2021-devops-final-back
|
b0da5cb36ec3626b0f2f1ce467b2fa4e8e28125c
|
[
"MIT"
] | 1
|
2021-01-17T23:05:51.000Z
|
2021-01-17T23:05:51.000Z
|
# -*- coding: utf-8 -*-
"""Application configuration."""
import os
from datetime import timedelta
class Config(object):
"""Base configuration."""
SECRET_KEY = os.environ.get('CONDUIT_SECRET', 'secret-key') # TODO: Change me
APP_DIR = os.path.abspath(os.path.dirname(__file__)) # This directory
PROJECT_ROOT = os.path.abspath(os.path.join(APP_DIR, os.pardir))
BCRYPT_LOG_ROUNDS = 13
DEBUG_TB_INTERCEPT_REDIRECTS = False
CACHE_TYPE = 'simple' # Can be "memcached", "redis", etc.
SQLALCHEMY_TRACK_MODIFICATIONS = False
JWT_AUTH_USERNAME_KEY = 'email'
JWT_AUTH_HEADER_PREFIX = 'Token'
CORS_ORIGIN_WHITELIST = [
'http://0.0.0.0:4100',
'http://localhost:4100',
'http://0.0.0.0:8000',
'http://localhost:8000',
'http://0.0.0.0:4200',
'http://localhost:4200',
'http://0.0.0.0:4000',
'http://localhost:4000',
'https://super-cool-site-by-pierredarcas.netlify.app',
'http://super-cool-site-by-pierredarcas.netlify.app',
]
JWT_HEADER_TYPE = 'Token'
class ProdConfig(Config):
"""Production configuration."""
ENV = 'prod'
DEBUG = False
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL',
'postgresql://localhost/example')
class DevConfig(Config):
"""Development configuration."""
ENV = 'dev'
DEBUG = True
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL',
'postgresql://localhost/example')
CACHE_TYPE = 'simple' # Can be "memcached", "redis", etc.
JWT_ACCESS_TOKEN_EXPIRES = timedelta(10 ** 6)
class TestConfig(Config):
"""Test configuration."""
TESTING = True
DEBUG = True
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL',
'postgresql://localhost/example')
# For faster tests; needs at least 4 to avoid "ValueError: Invalid rounds"
BCRYPT_LOG_ROUNDS = 4
| 31.825397
| 82
| 0.612469
|
f6fc2adc18b30fae772d5091c928cd0a093f6a76
| 9,960
|
py
|
Python
|
libai/evaluation/evaluator.py
|
Kitten97/libai
|
4ca0341628dc0b850acbc30d3766797cfc8a6c86
|
[
"Apache-2.0"
] | null | null | null |
libai/evaluation/evaluator.py
|
Kitten97/libai
|
4ca0341628dc0b850acbc30d3766797cfc8a6c86
|
[
"Apache-2.0"
] | null | null | null |
libai/evaluation/evaluator.py
|
Kitten97/libai
|
4ca0341628dc0b850acbc30d3766797cfc8a6c86
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import logging
import time
from collections import OrderedDict, abc
from contextlib import ExitStack, contextmanager
from typing import Callable, List, Union
import oneflow as flow
from libai.utils import distributed as dist
from libai.utils.logger import log_every_n_seconds
from .utils import pad_batch
class DatasetEvaluator:
"""
Base class for a dataset evaluator.
The function :func:`inference_on_dataset` runs the model over
all samples in the dataset, and have a DatasetEvaluator to process the inputs/outputs.
This class will accumulate information of the inputs/outputs (by :meth:`process`),
and produce evaluation results in the end (by :meth:`evaluate`).
"""
def reset(self):
"""
Preparation for a new round of evaluation.
Should be called before starting a round of evaluation.
"""
def process(self, inputs, outputs):
"""
Process the pair of inputs and outputs.
.. code-block:: python
pred_logits = outputs["prediction_scores"]
labels = inputs["labels"]
# do evaluation on pred_logits/labels pair
...
Args:
inputs (dict): the inputs that's used to call the model.
outputs (dict): the return dict of `model(**inputs)`
"""
def evaluate(self):
"""
Evaluate/summarize the performance, after processing all input/output pairs.
Returns:
dict:
A new evaluator class can return a dict of arbitrary format
as long as the user can process the results.
In our train_net.py, we expect the following format:
* key: the name of the task (e.g., Classification)
* value: a dict of {metric name: score}, e.g.: {"Acc@1": 75.0}
"""
class DatasetEvaluators(DatasetEvaluator):
"""
Wrapper class to combine multiple :class:`DatasetEvaluator` instances.
This class dispatches every evaluation call to
all of its :class:`DatasetEvaluator`.
"""
def __init__(self, evaluators):
"""
Args:
evaluators (list): the evaluators to combine.
"""
super().__init__()
self._evaluators = evaluators
def reset(self):
for evaluator in self._evaluators:
evaluator.reset()
def process(self, inputs, outputs):
for evaluator in self._evaluators:
evaluator.process(inputs, outputs)
def evaluate(self):
results = OrderedDict()
for evaluator in self._evaluators:
result = evaluator.evaluate()
if dist.is_main_process() and result is not None:
for k, v in result.items():
assert (
k not in results
), "Different evaluators produce results with the same key {}".format(k)
results[k] = v
return results
def inference_on_dataset(
model,
data_loader,
batch_size,
get_batch: Callable,
evaluator: Union[DatasetEvaluator, List[DatasetEvaluator], None],
):
"""
Run model on the data_loader and evaluate the metrics with evaluator.
Also benchmark the inference speed of `model.__call__` accurately.
The model will be used in eval mode.
Args:
model (callable): a callable which takes an object from
`data_loader` and returns some outputs.
If it's an nn.Module, it will be temporarily set to `eval` mode.
If you wish to evaluate a model in `training` mode instead, you can
wrap the given model and override its behavior of `.eval()` and `.train()`.
batch_size: batch size for inference
data_loader: an iterable object with a length.
The elements it generates will be the inputs to the model.
get_batch: a Callable function for getting data from dataloader
evaluator: the evaluator(s) to run. Use `None` if you only want to benchmark,
but don't want to do any evaluation.
Returns:
The return value of `evaluator.evaluate()`
"""
num_devices = dist.get_world_size()
logger = logging.getLogger(__name__)
logger.info("Start inference on {} samples".format(len(data_loader.dataset)))
total = len(data_loader.dataset) # inference data loader must have a fixed length
if evaluator is None:
# create a no-op evaluator
evaluator = DatasetEvaluators([])
if isinstance(evaluator, abc.MutableSequence):
evaluator = DatasetEvaluators(evaluator)
evaluator.reset()
num_warmup = min(5, total - 1)
start_time = time.perf_counter()
total_data_time = 0
total_compute_time = 0
total_eval_time = 0
consumed_samples = 0
dps = dist.get_data_parallel_size()
last_batch_lack = (dps - (total % dps)) % dps
with ExitStack() as stack:
if isinstance(model, (flow.nn.Module, flow.nn.Graph)):
stack.enter_context(inference_context(model))
stack.enter_context(flow.no_grad())
start_data_time = time.perf_counter()
for idx, inputs in enumerate(data_loader):
total_data_time += time.perf_counter() - start_data_time
if idx == num_warmup:
start_time = time.perf_counter()
total_data_time = 0
total_compute_time = 0
total_eval_time = 0
start_compute_time = time.perf_counter()
# model forward
data = get_batch(inputs)
is_last_batch = idx == len(data_loader) - 1
paded_data, valid_sample = pad_batch(data, batch_size, last_batch_lack, is_last_batch)
outputs = model(**paded_data)
# get valid sample
valid_data = {key: dist.ttol(value)[:valid_sample] for key, value in data.items()}
valid_outputs = {key: dist.ttol(value)[:valid_sample] for key, value in outputs.items()}
if flow.cuda.is_available():
dist.synchronize()
total_compute_time += time.perf_counter() - start_compute_time
start_eval_time = time.perf_counter()
if dist.is_main_process():
evaluator.process(valid_data, valid_outputs)
dist.synchronize()
total_eval_time += time.perf_counter() - start_eval_time
consumed_samples += valid_sample
iters_after_start = idx + 1 - num_warmup * int(idx >= num_warmup)
data_seconds_per_iter = total_data_time / iters_after_start
compute_seconds_per_iter = total_compute_time / iters_after_start
eval_seconds_per_iter = total_eval_time / iters_after_start
total_seconds_per_iter = (time.perf_counter() - start_time) / iters_after_start
if idx >= num_warmup * 2 or compute_seconds_per_iter > 5:
eta = datetime.timedelta(
seconds=int(total_seconds_per_iter * (total // batch_size - idx - 1))
)
log_every_n_seconds(
logging.INFO,
(
f"Inference done {consumed_samples}/{total}. "
f"Dataloading: {data_seconds_per_iter:.4f} s/iter. "
f"Inference: {compute_seconds_per_iter:.4f} s/iter. "
f"Eval: {eval_seconds_per_iter:.4f} s/iter. "
f"Total: {total_seconds_per_iter:.4f} s/iter. "
f"ETA={eta}"
),
n=5,
)
start_data_time = time.perf_counter()
# Measure the time only for this worker (before the synchronization barrier)
total_time = time.perf_counter() - start_time
total_time_str = str(datetime.timedelta(seconds=total_time))
# NOTE this format is parsed by grep
logger.info("Total valid samples: {}".format(consumed_samples))
logger.info(
"Total inference time: {} ({:.6f} s / iter per device, on {} devices)".format(
total_time_str, total_time / (total - num_warmup), num_devices
)
)
total_compute_time_str = str(datetime.timedelta(seconds=int(total_compute_time)))
logger.info(
"Total inference pure compute time: {} ({:.6f} s / iter per device, on {} devices)".format(
total_compute_time_str,
total_compute_time / (total - num_warmup),
num_devices,
)
)
results = evaluator.evaluate()
# An evaluator may return None when not in main process.
# Replace it by an empty dict instead to make it easier for downstream code to handle
if results is None:
results = {}
return results
@contextmanager
def inference_context(model):
"""
A context where the model is temporarily changed to eval mode,
and restored to previous mode afterwards.
Args:
model: eager or graph mode in oneflow
"""
training_mode = model.model.training if isinstance(model, flow.nn.Graph) else model.training
if isinstance(model, flow.nn.Graph):
model.model.eval()
else:
model.eval()
yield
if isinstance(model, flow.nn.Graph):
model.model.train(training_mode)
else:
model.train(training_mode)
| 37.584906
| 100
| 0.629217
|
037cf130623124d4b2a02c3dd7bd73617961f7a7
| 2,810
|
py
|
Python
|
pyleecan/GUI/Dialog/DMachineSetup/SWSlot/PWSlot23/Gen_PWSlot23.py
|
Kelos-Zhu/pyleecan
|
368f8379688e31a6c26d2c1cd426f21dfbceff2a
|
[
"Apache-2.0"
] | 2
|
2019-06-08T15:04:39.000Z
|
2020-09-07T13:32:22.000Z
|
pyleecan/GUI/Dialog/DMachineSetup/SWSlot/PWSlot23/Gen_PWSlot23.py
|
lyhehehe/pyleecan
|
421e9a843bf30d796415c77dc934546adffd1cd7
|
[
"Apache-2.0"
] | null | null | null |
pyleecan/GUI/Dialog/DMachineSetup/SWSlot/PWSlot23/Gen_PWSlot23.py
|
lyhehehe/pyleecan
|
421e9a843bf30d796415c77dc934546adffd1cd7
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""File generated according to PWSlot23/gen_list.json
WARNING! All changes made in this file will be lost!
"""
from pyleecan.GUI.Dialog.DMachineSetup.SWSlot.PWSlot23.Ui_PWSlot23 import Ui_PWSlot23
class Gen_PWSlot23(Ui_PWSlot23):
def setupUi(self, PWSlot23):
"""Abstract class to update the widget according to the csv doc
"""
Ui_PWSlot23.setupUi(self, PWSlot23)
# Setup of in_W0
txt = self.tr(u"""Slot isthmus width.""")
self.in_W0.setWhatsThis(txt)
self.in_W0.setToolTip(txt)
# Setup of lf_W0
self.lf_W0.validator().setBottom(0)
txt = self.tr(u"""Slot isthmus width.""")
self.lf_W0.setWhatsThis(txt)
self.lf_W0.setToolTip(txt)
# Setup of in_W1
txt = self.tr(u"""Slot top width.""")
self.in_W1.setWhatsThis(txt)
self.in_W1.setToolTip(txt)
# Setup of lf_W1
self.lf_W1.validator().setBottom(0)
txt = self.tr(u"""Slot top width.""")
self.lf_W1.setWhatsThis(txt)
self.lf_W1.setToolTip(txt)
# Setup of in_W2
txt = self.tr(u"""Slot bottom width.""")
self.in_W2.setWhatsThis(txt)
self.in_W2.setToolTip(txt)
# Setup of lf_W2
self.lf_W2.validator().setBottom(0)
txt = self.tr(u"""Slot bottom width.""")
self.lf_W2.setWhatsThis(txt)
self.lf_W2.setToolTip(txt)
# Setup of in_W3
txt = self.tr(u"""Tooth width""")
self.in_W3.setWhatsThis(txt)
self.in_W3.setToolTip(txt)
# Setup of lf_W3
self.lf_W3.validator().setBottom(0)
txt = self.tr(u"""Tooth width""")
self.lf_W3.setWhatsThis(txt)
self.lf_W3.setToolTip(txt)
# Setup of in_H0
txt = self.tr(u"""Slot isthmus height.""")
self.in_H0.setWhatsThis(txt)
self.in_H0.setToolTip(txt)
# Setup of lf_H0
self.lf_H0.validator().setBottom(0)
txt = self.tr(u"""Slot isthmus height.""")
self.lf_H0.setWhatsThis(txt)
self.lf_H0.setToolTip(txt)
# Setup of in_H1
txt = self.tr(u"""height or angle (See Schematics)""")
self.in_H1.setWhatsThis(txt)
self.in_H1.setToolTip(txt)
# Setup of lf_H1
self.lf_H1.validator().setBottom(0)
txt = self.tr(u"""height or angle (See Schematics)""")
self.lf_H1.setWhatsThis(txt)
self.lf_H1.setToolTip(txt)
# Setup of in_H2
txt = self.tr(u"""Slot height below wedge """)
self.in_H2.setWhatsThis(txt)
self.in_H2.setToolTip(txt)
# Setup of lf_H2
self.lf_H2.validator().setBottom(0)
txt = self.tr(u"""Slot height below wedge """)
self.lf_H2.setWhatsThis(txt)
self.lf_H2.setToolTip(txt)
| 31.573034
| 85
| 0.598221
|
6458652c02f9ace430e661bbebd2bc585d766a59
| 522
|
py
|
Python
|
OpenGLWrapper_JE/venv/Lib/site-packages/OpenGL/raw/GL/EXT/shader_integer_mix.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | null | null | null |
OpenGLWrapper_JE/venv/Lib/site-packages/OpenGL/raw/GL/EXT/shader_integer_mix.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | null | null | null |
OpenGLWrapper_JE/venv/Lib/site-packages/OpenGL/raw/GL/EXT/shader_integer_mix.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | null | null | null |
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
# End users want this...
from OpenGL.raw.GL._types import *
from OpenGL.raw.GL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GL_EXT_shader_integer_mix'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GL,'GL_EXT_shader_integer_mix',error_checker=_errors._error_checker)
| 32.625
| 120
| 0.772031
|
4c2ae7932733263f45acff2a82759cdd385f0309
| 6,919
|
py
|
Python
|
src/cache/update_cache.py
|
aeneasr/cloud-launcher
|
823ee95bb1cd12fa9508333269debebcb925f821
|
[
"Apache-2.0"
] | 1
|
2020-12-16T13:20:31.000Z
|
2020-12-16T13:20:31.000Z
|
src/cache/update_cache.py
|
aeneasr/cloud-launcher
|
823ee95bb1cd12fa9508333269debebcb925f821
|
[
"Apache-2.0"
] | null | null | null |
src/cache/update_cache.py
|
aeneasr/cloud-launcher
|
823ee95bb1cd12fa9508333269debebcb925f821
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
#
# Copyright 2014 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
# Queries Google Compute Engine and updates the local cache of GCP info.
# System imports
import argparse
import httplib2
import json
import logging
import os
import re
import urlparse
import sys
# Google Cloud API imports
from googleapiclient.discovery import build
from googleapiclient import errors
from googleapiclient import http
from oauth2client.client import flow_from_clientsecrets
from oauth2client.file import Storage
from oauth2client import gce
from oauth2client import tools
from oauth2client.tools import run_flow
API_VERSION = 'v1'
GCE_URL = 'https://www.googleapis.com/compute/%s/projects/' % (API_VERSION)
GCE_SCOPE = 'https://www.googleapis.com/auth/compute'
class GceService(object):
def __init__(self, flags):
self.__flags = flags
# Perform OAuth 2.0 authorization.
project_dir = os.path.join(
os.getenv('HOME'), 'cloud', 'projects', flags.project)
client_secrets = os.path.join(project_dir, 'client_secrets.json')
oauth2_storage = os.path.join(project_dir, 'oauth2.dat')
flow = flow_from_clientsecrets(client_secrets, scope=GCE_SCOPE)
storage = Storage(oauth2_storage)
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = run_flow(flow, storage, flags)
self.http = credentials.authorize(httplib2.Http())
self.compute = build('compute', API_VERSION)
def WriteJsonToFile(json_obj, filename):
print 'Updating %s ...' % filename
json_str = json.dumps(json_obj, indent=2, separators=(',', ': '), sort_keys=True)
with open(filename, 'w') as output:
output.write('%s\n' % json_str)
def UpdateVmImages(gce, flags):
vm_images = {}
vm_image_projects = sorted([
'centos-cloud', 'coreos-cloud', 'debian-cloud', 'gce-nvme',
'google-containers', 'opensuse-cloud', 'rhel-cloud', 'suse-cloud',
'ubuntu-os-cloud'
])
# TODO(mbrukman): add flag to avoid making remote calls, thereby only reading
# from the saved responses on disk.
for project in vm_image_projects:
images = gce.compute.images().list(project=project).execute(http=gce.http)
WriteJsonToFile(images, 'raw_data/%s.json' % project)
for project in vm_image_projects:
with open('raw_data/%s.json' % project, 'r') as json_file:
images = json.loads(json_file.read())
for item in images['items']:
if project not in vm_images:
vm_images[project] = {}
if 'images' not in vm_images[project]:
vm_images[project]['images'] = []
shortname = os.path.basename(urlparse.urlparse(item['selfLink']).path)
vm_images[project]['images'].append(shortname)
def LatestImage(images, date_pattern='-v[0-9]{8}$'):
vm_image_latest_dst = images[-1]
vm_image_latest_src = re.sub(date_pattern, '-latest', vm_image_latest_dst)
return (vm_image_latest_src, vm_image_latest_dst)
for project in vm_images:
images = vm_images[project].get('images', [])
if not images:
continue
if 'pseudo' not in vm_images[project]:
vm_images[project]['pseudo'] = {}
pseudo = vm_images[project]['pseudo']
if project == 'centos-cloud':
for centos in ('centos-6', 'centos-7'):
image_sublist = filter(lambda image: image.startswith(centos), images)
src, dst = LatestImage(image_sublist)
pseudo[src] = dst
elif project == 'coreos-cloud':
for substr in ('alpha', 'beta', 'stable'):
image_sublist = filter(lambda image: substr in image, images)
src, dst = LatestImage(image_sublist, '-[0-9]*-[0-9]-[0-9]-v[0-9]{8}$')
pseudo[src] = dst
elif project == 'debian-cloud':
backports = filter(lambda image: 'backports' in image, images)
not_backports = filter(lambda image: 'backports' not in image, images)
for image_sublist in (backports, not_backports):
src, dst = LatestImage(image_sublist)
pseudo[src] = dst
elif project == 'opensuse-cloud':
for release in ('opensuse-13-1', 'opensuse-13-2'):
image_sublist = filter(lambda image: release in image, images)
src, dst = LatestImage(image_sublist, '-v[0-9]{8}$')
pseudo[src] = dst
elif project == 'rhel-cloud':
for release in ('rhel-6', 'rhel-7'):
image_sublist = filter(lambda image: release in image, images)
src, dst = LatestImage(image_sublist, '-v[0-9]{8}$')
pseudo[src] = dst
elif project == 'suse-cloud':
for release in ('sles-11', 'sles-12'):
image_sublist = filter(lambda image: release in image, images)
src, dst = LatestImage(image_sublist, '-v[0-9]{8}$')
pseudo[src] = dst
elif project == 'ubuntu-os-cloud':
for release in ('precise', 'trusty', 'utopic'):
image_sublist = filter(lambda image: release in image, images)
src, dst = LatestImage(image_sublist, '-v[0-9]{8}.*$')
pseudo[src] = dst
else:
src, dst = LatestImage(images)
pseudo[src] = dst
WriteJsonToFile(vm_images, 'vm_images.json')
def UpdateZones(gce, flags):
zones = gce.compute.zones().list(project=flags.project).execute(http=gce.http)
# TODO(mbrukman): clean up the zones output.
WriteJsonToFile(zones, 'zones.json')
def main(argv):
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
parents=[tools.argparser])
parser.add_argument('--project', dest='project', required=True,
help='Project name')
parser.add_argument('--debug', dest='debug', default=False,
action='store_true',
help='Whether to output debug info')
parser.add_argument('--logging', dest='logging', default='',
choices=('', 'info', 'warning', 'error'),
help='Logging level to enable')
flags = parser.parse_args(argv[1:])
LOGGING = {
'': None,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
}
logging.basicConfig(level=LOGGING[flags.logging])
gce = GceService(flags)
UpdateVmImages(gce, flags)
# TODO(mbrukman): enable zone list caching once we define concise format.
# UpdateZones(gce, flags)
print 'Done.'
if __name__ == '__main__':
main(sys.argv)
| 34.595
| 83
| 0.665992
|
c2f172349429d189dbcfbe4d36e357fc05ec1733
| 111,560
|
py
|
Python
|
salt/modules/cmdmod.py
|
hvnsweeting/salt
|
abc9d3a0b51e6f5c4738cf71c221daf8b46fddcf
|
[
"Apache-2.0"
] | 2
|
2015-09-21T14:13:30.000Z
|
2016-02-12T11:33:46.000Z
|
salt/modules/cmdmod.py
|
hvnsweeting/salt
|
abc9d3a0b51e6f5c4738cf71c221daf8b46fddcf
|
[
"Apache-2.0"
] | null | null | null |
salt/modules/cmdmod.py
|
hvnsweeting/salt
|
abc9d3a0b51e6f5c4738cf71c221daf8b46fddcf
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
A module for shelling out.
Keep in mind that this module is insecure, in that it can give whomever has
access to the master root execution access to all salt minions.
'''
from __future__ import absolute_import
# Import python libs
import functools
import glob
import json
import logging
import os
import shutil
import subprocess
import sys
import time
import traceback
import fnmatch
import base64
import re
import tempfile
# Import salt libs
import salt.utils
import salt.utils.files
import salt.utils.powershell
import salt.utils.timed_subprocess
import salt.grains.extra
import salt.ext.six as six
from salt.utils import vt
from salt.exceptions import CommandExecutionError, TimedProcTimeoutError, \
SaltInvocationError
from salt.log import LOG_LEVELS
from salt.ext.six.moves import range, zip
from salt.ext.six.moves import shlex_quote as _cmd_quote
# Only available on POSIX systems, nonfatal on windows
try:
import pwd
except ImportError:
pass
if salt.utils.is_windows():
from salt.utils.win_runas import runas as win_runas
HAS_WIN_RUNAS = True
else:
HAS_WIN_RUNAS = False
__proxyenabled__ = ['*']
# Define the module's virtual name
__virtualname__ = 'cmd'
# Set up logging
log = logging.getLogger(__name__)
DEFAULT_SHELL = salt.grains.extra.shell()['shell']
def __virtual__():
'''
Overwriting the cmd python module makes debugging modules
with pdb a bit harder so lets do it this way instead.
'''
return __virtualname__
def _check_cb(cb_):
'''
If the callback is None or is not callable, return a lambda that returns
the value passed.
'''
if cb_ is not None:
if hasattr(cb_, '__call__'):
return cb_
else:
log.error('log_callback is not callable, ignoring')
return lambda x: x
def _python_shell_default(python_shell, __pub_jid):
'''
Set python_shell default based on remote execution and __opts__['cmd_safe']
'''
try:
# Default to python_shell=True when run directly from remote execution
# system. Cross-module calls won't have a jid.
if __pub_jid and python_shell is None:
return True
elif __opts__.get('cmd_safe', True) is False and python_shell is None:
# Override-switch for python_shell
return True
except NameError:
pass
return python_shell
def _chroot_pids(chroot):
pids = []
for root in glob.glob('/proc/[0-9]*/root'):
try:
link = os.path.realpath(root)
if link.startswith(chroot):
pids.append(int(os.path.basename(
os.path.dirname(root)
)))
except OSError:
pass
return pids
def _render_cmd(cmd, cwd, template, saltenv='base', pillarenv=None, pillar_override=None):
'''
If template is a valid template engine, process the cmd and cwd through
that engine.
'''
if not template:
return (cmd, cwd)
# render the path as a template using path_template_engine as the engine
if template not in salt.utils.templates.TEMPLATE_REGISTRY:
raise CommandExecutionError(
'Attempted to render file paths with unavailable engine '
'{0}'.format(template)
)
kwargs = {}
kwargs['salt'] = __salt__
if pillarenv is not None or pillar_override is not None:
pillarenv = pillarenv or __opts__['pillarenv']
kwargs['pillar'] = _gather_pillar(pillarenv, pillar_override)
else:
kwargs['pillar'] = __pillar__
kwargs['grains'] = __grains__
kwargs['opts'] = __opts__
kwargs['saltenv'] = saltenv
def _render(contents):
# write out path to temp file
tmp_path_fn = salt.utils.files.mkstemp()
with salt.utils.fopen(tmp_path_fn, 'w+') as fp_:
fp_.write(contents)
data = salt.utils.templates.TEMPLATE_REGISTRY[template](
tmp_path_fn,
to_str=True,
**kwargs
)
salt.utils.safe_rm(tmp_path_fn)
if not data['result']:
# Failed to render the template
raise CommandExecutionError(
'Failed to execute cmd with error: {0}'.format(
data['data']
)
)
else:
return data['data']
cmd = _render(cmd)
cwd = _render(cwd)
return (cmd, cwd)
def _check_loglevel(level='info', quiet=False):
'''
Retrieve the level code for use in logging.Logger.log().
'''
def _bad_level(level):
log.error(
'Invalid output_loglevel \'{0}\'. Valid levels are: {1}. Falling '
'back to \'info\'.'
.format(
level,
', '.join(
sorted(LOG_LEVELS, reverse=True)
)
)
)
return LOG_LEVELS['info']
if salt.utils.is_true(quiet) or str(level).lower() == 'quiet':
return None
try:
level = level.lower()
if level not in LOG_LEVELS:
return _bad_level(level)
except AttributeError:
return _bad_level(level)
return LOG_LEVELS[level]
def _parse_env(env):
if not env:
env = {}
if isinstance(env, list):
env = salt.utils.repack_dictlist(env)
if not isinstance(env, dict):
env = {}
return env
def _gather_pillar(pillarenv, pillar_override):
'''
Whenever a state run starts, gather the pillar data fresh
'''
pillar = salt.pillar.get_pillar(
__opts__,
__grains__,
__opts__['id'],
__opts__['environment'],
pillar=pillar_override,
pillarenv=pillarenv
)
ret = pillar.compile_pillar()
if pillar_override and isinstance(pillar_override, dict):
ret.update(pillar_override)
return ret
def _check_avail(cmd):
'''
Check to see if the given command can be run
'''
if isinstance(cmd, list):
cmd = ' '.join([str(x) if not isinstance(x, six.string_types) else x
for x in cmd])
bret = True
wret = False
if __salt__['config.get']('cmd_blacklist_glob'):
blist = __salt__['config.get']('cmd_blacklist_glob', [])
for comp in blist:
if fnmatch.fnmatch(cmd, comp):
# BAD! you are blacklisted
bret = False
if __salt__['config.get']('cmd_whitelist_glob', []):
blist = __salt__['config.get']('cmd_whitelist_glob', [])
for comp in blist:
if fnmatch.fnmatch(cmd, comp):
# GOOD! You are whitelisted
wret = True
break
else:
# If no whitelist set then alls good!
wret = True
return bret and wret
def _run(cmd,
cwd=None,
stdin=None,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
output_loglevel='debug',
log_callback=None,
runas=None,
shell=DEFAULT_SHELL,
python_shell=False,
env=None,
clean_env=False,
rstrip=True,
template=None,
umask=None,
timeout=None,
with_communicate=True,
reset_system_locale=True,
ignore_retcode=False,
saltenv='base',
pillarenv=None,
pillar_override=None,
use_vt=False,
password=None,
bg=False,
encoded_cmd=False,
**kwargs):
'''
Do the DRY thing and only call subprocess.Popen() once
'''
if 'pillar' in kwargs and not pillar_override:
pillar_override = kwargs['pillar']
if _is_valid_shell(shell) is False:
log.warning(
'Attempt to run a shell command with what may be an invalid shell! '
'Check to ensure that the shell <{0}> is valid for this user.'
.format(shell))
log_callback = _check_cb(log_callback)
# Set the default working directory to the home directory of the user
# salt-minion is running as. Defaults to home directory of user under which
# the minion is running.
if not cwd:
cwd = os.path.expanduser('~{0}'.format('' if not runas else runas))
# make sure we can access the cwd
# when run from sudo or another environment where the euid is
# changed ~ will expand to the home of the original uid and
# the euid might not have access to it. See issue #1844
if not os.access(cwd, os.R_OK):
cwd = '/'
if salt.utils.is_windows():
cwd = os.tempnam()[:3]
else:
# Handle edge cases where numeric/other input is entered, and would be
# yaml-ified into non-string types
cwd = str(cwd)
if not salt.utils.is_windows():
if not os.path.isfile(shell) or not os.access(shell, os.X_OK):
msg = 'The shell {0} is not available'.format(shell)
raise CommandExecutionError(msg)
if salt.utils.is_windows() and use_vt: # Memozation so not much overhead
raise CommandExecutionError('VT not available on windows')
if shell.lower().strip() == 'powershell':
# Strip whitespace
if isinstance(cmd, six.string_types):
cmd = cmd.strip()
# If we were called by script(), then fakeout the Windows
# shell to run a Powershell script.
# Else just run a Powershell command.
stack = traceback.extract_stack(limit=2)
# extract_stack() returns a list of tuples.
# The last item in the list [-1] is the current method.
# The third item[2] in each tuple is the name of that method.
if stack[-2][2] == 'script':
cmd = 'Powershell -NonInteractive -NoProfile -ExecutionPolicy Bypass -File ' + cmd
elif encoded_cmd:
cmd = 'Powershell -NonInteractive -EncodedCommand {0}'.format(cmd)
else:
cmd = 'Powershell -NonInteractive -NoProfile "{0}"'.format(cmd.replace('"', '\\"'))
# munge the cmd and cwd through the template
(cmd, cwd) = _render_cmd(cmd, cwd, template, saltenv, pillarenv, pillar_override)
ret = {}
# If the pub jid is here then this is a remote ex or salt call command and needs to be
# checked if blacklisted
if '__pub_jid' in kwargs:
if not _check_avail(cmd):
msg = 'This shell command is not permitted: "{0}"'.format(cmd)
raise CommandExecutionError(msg)
env = _parse_env(env)
for bad_env_key in (x for x, y in six.iteritems(env) if y is None):
log.error('Environment variable \'{0}\' passed without a value. '
'Setting value to an empty string'.format(bad_env_key))
env[bad_env_key] = ''
def _get_stripped(cmd):
# Return stripped command string copies to improve logging.
if isinstance(cmd, list):
return [x.strip() if isinstance(x, str) else x for x in cmd]
elif isinstance(cmd, str):
return cmd.strip()
else:
return cmd
if _check_loglevel(output_loglevel) is not None:
# Always log the shell commands at INFO unless quiet logging is
# requested. The command output is what will be controlled by the
# 'loglevel' parameter.
msg = (
'Executing command {0}{1}{0} {2}in directory \'{3}\'{4}'.format(
'\'' if not isinstance(cmd, list) else '',
_get_stripped(cmd),
'as user \'{0}\' '.format(runas) if runas else '',
cwd,
'. Executing command in the background, no output will be '
'logged.' if bg else ''
)
)
log.info(log_callback(msg))
if runas and salt.utils.is_windows():
if not password:
msg = 'password is a required argument for runas on Windows'
raise CommandExecutionError(msg)
if not HAS_WIN_RUNAS:
msg = 'missing salt/utils/win_runas.py'
raise CommandExecutionError(msg)
if not isinstance(cmd, list):
cmd = salt.utils.shlex_split(cmd, posix=False)
cmd = ' '.join(cmd)
return win_runas(cmd, runas, password, cwd)
if runas:
# Save the original command before munging it
try:
pwd.getpwnam(runas)
except KeyError:
raise CommandExecutionError(
'User \'{0}\' is not available'.format(runas)
)
try:
# Getting the environment for the runas user
# There must be a better way to do this.
py_code = (
'import sys, os, itertools; '
'sys.stdout.write(\"\\0\".join(itertools.chain(*os.environ.items())))'
)
if __grains__['os'] in ['MacOS', 'Darwin']:
env_cmd = ('sudo', '-i', '-u', runas, '--',
sys.executable)
elif __grains__['os'] in ['FreeBSD']:
env_cmd = ('su', '-', runas, '-c',
"{0} -c {1}".format(shell, sys.executable))
elif __grains__['os_family'] in ['Solaris']:
env_cmd = ('su', '-', runas, '-c', sys.executable)
elif __grains__['os_family'] in ['AIX']:
env_cmd = ('su', runas, '-c', sys.executable)
else:
env_cmd = ('su', '-s', shell, '-', runas, '-c', sys.executable)
env_encoded = subprocess.Popen(
env_cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE
).communicate(py_code.encode(__salt_system_encoding__))[0]
if six.PY2:
import itertools
env_runas = dict(itertools.izip(*[iter(env_encoded.split(b'\0'))]*2))
elif six.PY3:
if isinstance(env_encoded, str):
env_encoded = env_encoded.encode(__salt_system_encoding__)
env_runas = dict(list(zip(*[iter(env_encoded.split(b'\0'))]*2)))
env_runas.update(env)
env = env_runas
# Encode unicode kwargs to filesystem encoding to avoid a
# UnicodeEncodeError when the subprocess is invoked.
fse = sys.getfilesystemencoding()
for key, val in six.iteritems(env):
if isinstance(val, six.text_type):
env[key] = val.encode(fse)
except ValueError:
raise CommandExecutionError(
'Environment could not be retrieved for User \'{0}\''.format(
runas
)
)
if reset_system_locale is True:
if not salt.utils.is_windows():
# Default to C!
# Salt only knows how to parse English words
# Don't override if the user has passed LC_ALL
env.setdefault('LC_CTYPE', 'C')
env.setdefault('LC_NUMERIC', 'C')
env.setdefault('LC_TIME', 'C')
env.setdefault('LC_COLLATE', 'C')
env.setdefault('LC_MONETARY', 'C')
env.setdefault('LC_MESSAGES', 'C')
env.setdefault('LC_PAPER', 'C')
env.setdefault('LC_NAME', 'C')
env.setdefault('LC_ADDRESS', 'C')
env.setdefault('LC_TELEPHONE', 'C')
env.setdefault('LC_MEASUREMENT', 'C')
env.setdefault('LC_IDENTIFICATION', 'C')
else:
# On Windows set the codepage to US English.
if python_shell:
cmd = 'chcp 437 > nul & ' + cmd
if clean_env:
run_env = env
else:
run_env = os.environ.copy()
run_env.update(env)
if python_shell is None:
python_shell = False
kwargs = {'cwd': cwd,
'shell': python_shell,
'env': run_env,
'stdin': str(stdin) if stdin is not None else stdin,
'stdout': stdout,
'stderr': stderr,
'with_communicate': with_communicate,
'timeout': timeout,
'bg': bg,
}
if umask is not None:
_umask = str(umask).lstrip('0')
if _umask == '':
msg = 'Zero umask is not allowed.'
raise CommandExecutionError(msg)
try:
_umask = int(_umask, 8)
except ValueError:
msg = 'Invalid umask: \'{0}\''.format(umask)
raise CommandExecutionError(msg)
else:
_umask = None
if runas or umask:
kwargs['preexec_fn'] = functools.partial(
salt.utils.chugid_and_umask,
runas,
_umask)
if not salt.utils.is_windows():
# close_fds is not supported on Windows platforms if you redirect
# stdin/stdout/stderr
if kwargs['shell'] is True:
kwargs['executable'] = shell
kwargs['close_fds'] = True
if not os.path.isabs(cwd) or not os.path.isdir(cwd):
raise CommandExecutionError(
'Specified cwd \'{0}\' either not absolute or does not exist'
.format(cwd)
)
if python_shell is not True and not isinstance(cmd, list):
posix = True
if salt.utils.is_windows():
posix = False
cmd = salt.utils.shlex_split(cmd, posix=posix)
if not use_vt:
# This is where the magic happens
try:
proc = salt.utils.timed_subprocess.TimedProc(cmd, **kwargs)
except (OSError, IOError) as exc:
raise CommandExecutionError(
'Unable to run command \'{0}\' with the context \'{1}\', '
'reason: {2}'.format(cmd, kwargs, exc)
)
try:
proc.run()
except TimedProcTimeoutError as exc:
ret['stdout'] = str(exc)
ret['stderr'] = ''
ret['retcode'] = None
ret['pid'] = proc.process.pid
# ok return code for timeouts?
ret['retcode'] = 1
return ret
out, err = proc.stdout, proc.stderr
if err is None:
# Will happen if redirect_stderr is True, since stderr was sent to
# stdout.
err = ''
if rstrip:
if out is not None:
out = salt.utils.to_str(out).rstrip()
if err is not None:
err = salt.utils.to_str(err).rstrip()
ret['pid'] = proc.process.pid
ret['retcode'] = proc.process.returncode
ret['stdout'] = out
ret['stderr'] = err
else:
to = ''
if timeout:
to = ' (timeout: {0}s)'.format(timeout)
if _check_loglevel(output_loglevel) is not None:
msg = 'Running {0} in VT{1}'.format(cmd, to)
log.debug(log_callback(msg))
stdout, stderr = '', ''
now = time.time()
if timeout:
will_timeout = now + timeout
else:
will_timeout = -1
try:
proc = vt.Terminal(cmd,
shell=True,
log_stdout=True,
log_stderr=True,
cwd=cwd,
preexec_fn=kwargs.get('preexec_fn', None),
env=run_env,
log_stdin_level=output_loglevel,
log_stdout_level=output_loglevel,
log_stderr_level=output_loglevel,
stream_stdout=True,
stream_stderr=True)
ret['pid'] = proc.pid
while proc.has_unread_data:
try:
try:
time.sleep(0.5)
try:
cstdout, cstderr = proc.recv()
except IOError:
cstdout, cstderr = '', ''
if cstdout:
stdout += cstdout
else:
cstdout = ''
if cstderr:
stderr += cstderr
else:
cstderr = ''
if timeout and (time.time() > will_timeout):
ret['stderr'] = (
'SALT: Timeout after {0}s\n{1}').format(
timeout, stderr)
ret['retcode'] = None
break
except KeyboardInterrupt:
ret['stderr'] = 'SALT: User break\n{0}'.format(stderr)
ret['retcode'] = 1
break
except vt.TerminalException as exc:
log.error(
'VT: {0}'.format(exc),
exc_info_on_loglevel=logging.DEBUG)
ret = {'retcode': 1, 'pid': '2'}
break
# only set stdout on success as we already mangled in other
# cases
ret['stdout'] = stdout
if not proc.isalive():
# Process terminated, i.e., not canceled by the user or by
# the timeout
ret['stderr'] = stderr
ret['retcode'] = proc.exitstatus
ret['pid'] = proc.pid
finally:
proc.close(terminate=True, kill=True)
try:
if ignore_retcode:
__context__['retcode'] = 0
else:
__context__['retcode'] = ret['retcode']
except NameError:
# Ignore the context error during grain generation
pass
return ret
def _run_quiet(cmd,
cwd=None,
stdin=None,
runas=None,
shell=DEFAULT_SHELL,
python_shell=False,
env=None,
template=None,
umask=None,
timeout=None,
reset_system_locale=True,
saltenv='base',
pillarenv=None,
pillar_override=None):
'''
Helper for running commands quietly for minion startup
'''
return _run(cmd,
runas=runas,
cwd=cwd,
stdin=stdin,
stderr=subprocess.STDOUT,
output_loglevel='quiet',
log_callback=None,
shell=shell,
python_shell=python_shell,
env=env,
template=template,
umask=umask,
timeout=timeout,
reset_system_locale=reset_system_locale,
saltenv=saltenv,
pillarenv=pillarenv,
pillar_override=pillar_override)['stdout']
def _run_all_quiet(cmd,
cwd=None,
stdin=None,
runas=None,
shell=DEFAULT_SHELL,
python_shell=False,
env=None,
template=None,
umask=None,
timeout=None,
reset_system_locale=True,
saltenv='base',
pillarenv=None,
pillar_override=None,
output_loglevel=None):
'''
Helper for running commands quietly for minion startup.
Returns a dict of return data.
output_loglevel argument is ignored. This is here for when we alias
cmd.run_all directly to _run_all_quiet in certain chicken-and-egg
situations where modules need to work both before and after
the __salt__ dictionary is populated (cf dracr.py)
'''
return _run(cmd,
runas=runas,
cwd=cwd,
stdin=stdin,
shell=shell,
python_shell=python_shell,
env=env,
output_loglevel='quiet',
log_callback=None,
template=template,
umask=umask,
timeout=timeout,
reset_system_locale=reset_system_locale,
saltenv=saltenv,
pillarenv=pillarenv,
pillar_override=pillar_override)
def run(cmd,
cwd=None,
stdin=None,
runas=None,
shell=DEFAULT_SHELL,
python_shell=None,
env=None,
clean_env=False,
template=None,
rstrip=True,
umask=None,
output_loglevel='debug',
log_callback=None,
timeout=None,
reset_system_locale=True,
ignore_retcode=False,
saltenv='base',
use_vt=False,
bg=False,
password=None,
encoded_cmd=False,
**kwargs):
r'''
Execute the passed command and return the output as a string
Note that ``env`` represents the environment variables for the command, and
should be formatted as a dict, or a YAML string which resolves to a dict.
:param str cmd: The command to run. ex: ``ls -lart /home``
:param str cwd: The current working directory to execute the command in,
defaults to ``/root`` (``C:\`` in windows)
:param str stdin: A string of standard input can be specified for the
command to be run using the ``stdin`` parameter. This can be useful in cases
where sensitive information must be read from standard input.:
:param str runas: User to run script as. If running on a Windows minion you
must also pass a password
:param str password: Windows only. Required when specifying ``runas``. This
parameter will be ignored on non-Windows platforms.
.. versionadded:: 2016.3.0
:param str shell: Shell to execute under. Defaults to the system default
shell.
:param bool python_shell: If False, let python handle the positional
arguments. Set to True to use shell features, such as pipes or redirection
:param bool bg: If True, run command in background and do not await or deliver it's results
:param list env: A list of environment variables to be set prior to
execution.
Example:
.. code-block:: yaml
salt://scripts/foo.sh:
cmd.script:
- env:
- BATCH: 'yes'
.. warning::
The above illustrates a common PyYAML pitfall, that **yes**,
**no**, **on**, **off**, **true**, and **false** are all loaded as
boolean ``True`` and ``False`` values, and must be enclosed in
quotes to be used as strings. More info on this (and other) PyYAML
idiosyncrasies can be found :doc:`here
</topics/troubleshooting/yaml_idiosyncrasies>`.
Variables as values are not evaluated. So $PATH in the following
example is a literal '$PATH':
.. code-block:: yaml
salt://scripts/bar.sh:
cmd.script:
- env: "PATH=/some/path:$PATH"
One can still use the existing $PATH by using a bit of Jinja:
.. code-block:: yaml
{% set current_path = salt['environ.get']('PATH', '/bin:/usr/bin') %}
mycommand:
cmd.run:
- name: ls -l /
- env:
- PATH: {{ [current_path, '/my/special/bin']|join(':') }}
:param bool clean_env: Attempt to clean out all other shell environment
variables and set only those provided in the 'env' argument to this
function.
:param str template: If this setting is applied then the named templating
engine will be used to render the downloaded file. Currently jinja, mako,
and wempy are supported
:param bool rstrip: Strip all whitespace off the end of output before it is
returned.
:param str umask: The umask (in octal) to use when running the command.
:param str output_loglevel: Control the loglevel at which the output from
the command is logged. Note that the command being run will still be logged
(loglevel: DEBUG) regardless, unless ``quiet`` is used for this value.
:param int timeout: A timeout in seconds for the executed process to return.
:param bool use_vt: Use VT utils (saltstack) to stream the command output
more interactively to the console and the logs. This is experimental.
:param bool encoded_cmd: Specify if the supplied command is encoded.
Only applies to shell 'powershell'.
.. warning::
This function does not process commands through a shell
unless the python_shell flag is set to True. This means that any
shell-specific functionality such as 'echo' or the use of pipes,
redirection or &&, should either be migrated to cmd.shell or
have the python_shell=True flag set here.
The use of python_shell=True means that the shell will accept _any_ input
including potentially malicious commands such as 'good_command;rm -rf /'.
Be absolutely certain that you have sanitized your input prior to using
python_shell=True
CLI Example:
.. code-block:: bash
salt '*' cmd.run "ls -l | awk '/foo/{print \\$2}'"
The template arg can be set to 'jinja' or another supported template
engine to render the command arguments before execution.
For example:
.. code-block:: bash
salt '*' cmd.run template=jinja "ls -l /tmp/{{grains.id}} | awk '/foo/{print \\$2}'"
Specify an alternate shell with the shell parameter:
.. code-block:: bash
salt '*' cmd.run "Get-ChildItem C:\\ " shell='powershell'
A string of standard input can be specified for the command to be run using
the ``stdin`` parameter. This can be useful in cases where sensitive
information must be read from standard input.:
.. code-block:: bash
salt '*' cmd.run "grep f" stdin='one\\ntwo\\nthree\\nfour\\nfive\\n'
If an equal sign (``=``) appears in an argument to a Salt command it is
interpreted as a keyword argument in the format ``key=val``. That
processing can be bypassed in order to pass an equal sign through to the
remote shell command by manually specifying the kwarg:
.. code-block:: bash
salt '*' cmd.run cmd='sed -e s/=/:/g'
'''
python_shell = _python_shell_default(python_shell,
kwargs.get('__pub_jid', ''))
ret = _run(cmd,
runas=runas,
shell=shell,
python_shell=python_shell,
cwd=cwd,
stdin=stdin,
stderr=subprocess.STDOUT,
env=env,
clean_env=clean_env,
template=template,
rstrip=rstrip,
umask=umask,
output_loglevel=output_loglevel,
log_callback=log_callback,
timeout=timeout,
reset_system_locale=reset_system_locale,
ignore_retcode=ignore_retcode,
saltenv=saltenv,
use_vt=use_vt,
bg=bg,
password=password,
encoded_cmd=encoded_cmd,
**kwargs)
log_callback = _check_cb(log_callback)
lvl = _check_loglevel(output_loglevel)
if lvl is not None:
if not ignore_retcode and ret['retcode'] != 0:
if lvl < LOG_LEVELS['error']:
lvl = LOG_LEVELS['error']
msg = (
'Command \'{0}\' failed with return code: {1}'.format(
cmd,
ret['retcode']
)
)
log.error(log_callback(msg))
log.log(lvl, 'output: {0}'.format(log_callback(ret['stdout'])))
return ret['stdout']
def shell(cmd,
cwd=None,
stdin=None,
runas=None,
shell=DEFAULT_SHELL,
env=None,
clean_env=False,
template=None,
rstrip=True,
umask=None,
output_loglevel='debug',
log_callback=None,
quiet=False,
timeout=None,
reset_system_locale=True,
ignore_retcode=False,
saltenv='base',
use_vt=False,
bg=False,
password=None,
**kwargs):
'''
Execute the passed command and return the output as a string.
.. versionadded:: 2015.5.0
:param str cmd: The command to run. ex: 'ls -lart /home'
:param str cwd: The current working directory to execute the command in,
defaults to /root
:param str stdin: A string of standard input can be specified for the
command to be run using the ``stdin`` parameter. This can be useful in cases
where sensitive information must be read from standard input.
:param str runas: User to run script as. If running on a Windows minion you
must also pass a password
:param str password: Windows only. Required when specifying ``runas``. This
parameter will be ignored on non-Windows platforms.
.. versionadded:: 2016.3.0
:param int shell: Shell to execute under. Defaults to the system default
shell.
:param bool bg: If True, run command in background and do not await or
deliver its results
:param list env: A list of environment variables to be set prior to
execution.
Example:
.. code-block:: yaml
salt://scripts/foo.sh:
cmd.script:
- env:
- BATCH: 'yes'
.. warning::
The above illustrates a common PyYAML pitfall, that **yes**,
**no**, **on**, **off**, **true**, and **false** are all loaded as
boolean ``True`` and ``False`` values, and must be enclosed in
quotes to be used as strings. More info on this (and other) PyYAML
idiosyncrasies can be found :doc:`here
</topics/troubleshooting/yaml_idiosyncrasies>`.
Variables as values are not evaluated. So $PATH in the following
example is a literal '$PATH':
.. code-block:: yaml
salt://scripts/bar.sh:
cmd.script:
- env: "PATH=/some/path:$PATH"
One can still use the existing $PATH by using a bit of Jinja:
.. code-block:: yaml
{% set current_path = salt['environ.get']('PATH', '/bin:/usr/bin') %}
mycommand:
cmd.run:
- name: ls -l /
- env:
- PATH: {{ [current_path, '/my/special/bin']|join(':') }}
:param bool clean_env: Attempt to clean out all other shell environment
variables and set only those provided in the 'env' argument to this
function.
:param str template: If this setting is applied then the named templating
engine will be used to render the downloaded file. Currently jinja, mako,
and wempy are supported
:param bool rstrip: Strip all whitespace off the end of output before it is
returned.
:param str umask: The umask (in octal) to use when running the command.
:param str output_loglevel: Control the loglevel at which the output from
the command is logged. Note that the command being run will still be logged
(loglevel: DEBUG) regardless, unless ``quiet`` is used for this value.
:param int timeout: A timeout in seconds for the executed process to return.
:param bool use_vt: Use VT utils (saltstack) to stream the command output
more interactively to the console and the logs. This is experimental.
.. warning::
This passes the cmd argument directly to the shell
without any further processing! Be absolutely sure that you
have properly sanitized the command passed to this function
and do not use untrusted inputs.
.. note::
``env`` represents the environment variables for the command, and
should be formatted as a dict, or a YAML string which resolves to a dict.
CLI Example:
.. code-block:: bash
salt '*' cmd.shell "ls -l | awk '/foo/{print \\$2}'"
The template arg can be set to 'jinja' or another supported template
engine to render the command arguments before execution.
For example:
.. code-block:: bash
salt '*' cmd.shell template=jinja "ls -l /tmp/{{grains.id}} | awk '/foo/{print \\$2}'"
Specify an alternate shell with the shell parameter:
.. code-block:: bash
salt '*' cmd.shell "Get-ChildItem C:\\ " shell='powershell'
A string of standard input can be specified for the command to be run using
the ``stdin`` parameter. This can be useful in cases where sensitive
information must be read from standard input.:
.. code-block:: bash
salt '*' cmd.shell "grep f" stdin='one\\ntwo\\nthree\\nfour\\nfive\\n'
If an equal sign (``=``) appears in an argument to a Salt command it is
interpreted as a keyword argument in the format ``key=val``. That
processing can be bypassed in order to pass an equal sign through to the
remote shell command by manually specifying the kwarg:
.. code-block:: bash
salt '*' cmd.shell cmd='sed -e s/=/:/g'
'''
if 'python_shell' in kwargs:
python_shell = kwargs.pop('python_shell')
else:
python_shell = True
return run(cmd,
cwd=cwd,
stdin=stdin,
runas=runas,
shell=shell,
env=env,
clean_env=clean_env,
template=template,
rstrip=rstrip,
umask=umask,
output_loglevel=output_loglevel,
log_callback=log_callback,
quiet=quiet,
timeout=timeout,
reset_system_locale=reset_system_locale,
ignore_retcode=ignore_retcode,
saltenv=saltenv,
use_vt=use_vt,
python_shell=python_shell,
bg=bg,
password=password,
**kwargs)
def run_stdout(cmd,
cwd=None,
stdin=None,
runas=None,
shell=DEFAULT_SHELL,
python_shell=None,
env=None,
clean_env=False,
template=None,
rstrip=True,
umask=None,
output_loglevel='debug',
log_callback=None,
timeout=None,
reset_system_locale=True,
ignore_retcode=False,
saltenv='base',
use_vt=False,
password=None,
**kwargs):
'''
Execute a command, and only return the standard out
:param str cmd: The command to run. ex: 'ls -lart /home'
:param str cwd: The current working directory to execute the command in,
defaults to /root
:param str stdin: A string of standard input can be specified for the
command to be run using the ``stdin`` parameter. This can be useful in cases
where sensitive information must be read from standard input.:
:param str runas: User to run script as. If running on a Windows minion you
must also pass a password
:param str password: Windows only. Required when specifying ``runas``. This
parameter will be ignored on non-Windows platforms.
.. versionadded:: 2016.3.0
:param str shell: Shell to execute under. Defaults to the system default shell.
:param bool python_shell: If False, let python handle the positional
arguments. Set to True to use shell features, such as pipes or redirection
:param list env: A list of environment variables to be set prior to
execution.
Example:
.. code-block:: yaml
salt://scripts/foo.sh:
cmd.script:
- env:
- BATCH: 'yes'
.. warning::
The above illustrates a common PyYAML pitfall, that **yes**,
**no**, **on**, **off**, **true**, and **false** are all loaded as
boolean ``True`` and ``False`` values, and must be enclosed in
quotes to be used as strings. More info on this (and other) PyYAML
idiosyncrasies can be found :doc:`here
</topics/troubleshooting/yaml_idiosyncrasies>`.
Variables as values are not evaluated. So $PATH in the following
example is a literal '$PATH':
.. code-block:: yaml
salt://scripts/bar.sh:
cmd.script:
- env: "PATH=/some/path:$PATH"
One can still use the existing $PATH by using a bit of Jinja:
.. code-block:: yaml
{% set current_path = salt['environ.get']('PATH', '/bin:/usr/bin') %}
mycommand:
cmd.run:
- name: ls -l /
- env:
- PATH: {{ [current_path, '/my/special/bin']|join(':') }}
:param bool clean_env: Attempt to clean out all other shell environment
variables and set only those provided in the 'env' argument to this
function.
:param str template: If this setting is applied then the named templating
engine will be used to render the downloaded file. Currently jinja, mako,
and wempy are supported
:param bool rstrip: Strip all whitespace off the end of output before it is
returned.
:param str umask: The umask (in octal) to use when running the command.
:param str output_loglevel: Control the loglevel at which the output from
the command is logged. Note that the command being run will still be logged
(loglevel: DEBUG) regardless, unless ``quiet`` is used for this value.
:param int timeout: A timeout in seconds for the executed process to return.
:param bool use_vt: Use VT utils (saltstack) to stream the command output
more interactively to the console and the logs. This is experimental.
.. note::
``env`` represents the environment variables for the command, and
should be formatted as a dict, or a YAML string which resolves to a dict.
CLI Example:
.. code-block:: bash
salt '*' cmd.run_stdout "ls -l | awk '/foo/{print \\$2}'"
The template arg can be set to 'jinja' or another supported template
engine to render the command arguments before execution.
For example:
.. code-block:: bash
salt '*' cmd.run_stdout template=jinja "ls -l /tmp/{{grains.id}} | awk '/foo/{print \\$2}'"
A string of standard input can be specified for the command to be run using
the ``stdin`` parameter. This can be useful in cases where sensitive
information must be read from standard input.:
.. code-block:: bash
salt '*' cmd.run_stdout "grep f" stdin='one\\ntwo\\nthree\\nfour\\nfive\\n'
'''
python_shell = _python_shell_default(python_shell,
kwargs.get('__pub_jid', ''))
ret = _run(cmd,
runas=runas,
cwd=cwd,
stdin=stdin,
shell=shell,
python_shell=python_shell,
env=env,
clean_env=clean_env,
template=template,
rstrip=rstrip,
umask=umask,
output_loglevel=output_loglevel,
log_callback=log_callback,
timeout=timeout,
reset_system_locale=reset_system_locale,
ignore_retcode=ignore_retcode,
saltenv=saltenv,
use_vt=use_vt,
password=password,
**kwargs)
log_callback = _check_cb(log_callback)
lvl = _check_loglevel(output_loglevel)
if lvl is not None:
if not ignore_retcode and ret['retcode'] != 0:
if lvl < LOG_LEVELS['error']:
lvl = LOG_LEVELS['error']
msg = (
'Command \'{0}\' failed with return code: {1}'.format(
cmd,
ret['retcode']
)
)
log.error(log_callback(msg))
if ret['stdout']:
log.log(lvl, 'stdout: {0}'.format(log_callback(ret['stdout'])))
if ret['stderr']:
log.log(lvl, 'stderr: {0}'.format(log_callback(ret['stderr'])))
if ret['retcode']:
log.log(lvl, 'retcode: {0}'.format(ret['retcode']))
return ret['stdout']
def run_stderr(cmd,
cwd=None,
stdin=None,
runas=None,
shell=DEFAULT_SHELL,
python_shell=None,
env=None,
clean_env=False,
template=None,
rstrip=True,
umask=None,
output_loglevel='debug',
log_callback=None,
timeout=None,
reset_system_locale=True,
ignore_retcode=False,
saltenv='base',
use_vt=False,
password=None,
**kwargs):
'''
Execute a command and only return the standard error
:param str cmd: The command to run. ex: 'ls -lart /home'
:param str cwd: The current working directory to execute the command in,
defaults to /root
:param str stdin: A string of standard input can be specified for the
command to be run using the ``stdin`` parameter. This can be useful in cases
where sensitive information must be read from standard input.:
:param str runas: User to run script as. If running on a Windows minion you
must also pass a password
:param str password: Windows only. Required when specifying ``runas``. This
parameter will be ignored on non-Windows platforms.
.. versionadded:: 2016.3.0
:param str shell: Shell to execute under. Defaults to the system default
shell.
:param bool python_shell: If False, let python handle the positional
arguments. Set to True to use shell features, such as pipes or redirection
:param list env: A list of environment variables to be set prior to
execution.
Example:
.. code-block:: yaml
salt://scripts/foo.sh:
cmd.script:
- env:
- BATCH: 'yes'
.. warning::
The above illustrates a common PyYAML pitfall, that **yes**,
**no**, **on**, **off**, **true**, and **false** are all loaded as
boolean ``True`` and ``False`` values, and must be enclosed in
quotes to be used as strings. More info on this (and other) PyYAML
idiosyncrasies can be found :doc:`here
</topics/troubleshooting/yaml_idiosyncrasies>`.
Variables as values are not evaluated. So $PATH in the following
example is a literal '$PATH':
.. code-block:: yaml
salt://scripts/bar.sh:
cmd.script:
- env: "PATH=/some/path:$PATH"
One can still use the existing $PATH by using a bit of Jinja:
.. code-block:: yaml
{% set current_path = salt['environ.get']('PATH', '/bin:/usr/bin') %}
mycommand:
cmd.run:
- name: ls -l /
- env:
- PATH: {{ [current_path, '/my/special/bin']|join(':') }}
:param bool clean_env: Attempt to clean out all other shell environment
variables and set only those provided in the 'env' argument to this
function.
:param str template: If this setting is applied then the named templating
engine will be used to render the downloaded file. Currently jinja, mako,
and wempy are supported
:param bool rstrip: Strip all whitespace off the end of output before it is
returned.
:param str umask: The umask (in octal) to use when running the command.
:param str output_loglevel: Control the loglevel at which the output from
the command is logged. Note that the command being run will still be logged
(loglevel: DEBUG) regardless, unless ``quiet`` is used for this value.
:param int timeout: A timeout in seconds for the executed process to return.
:param bool use_vt: Use VT utils (saltstack) to stream the command output
more interactively to the console and the logs. This is experimental.
.. note::
``env`` represents the environment variables for the command, and
should be formatted as a dict, or a YAML string which resolves to a dict.
CLI Example:
.. code-block:: bash
salt '*' cmd.run_stderr "ls -l | awk '/foo/{print \\$2}'"
The template arg can be set to 'jinja' or another supported template
engine to render the command arguments before execution.
For example:
.. code-block:: bash
salt '*' cmd.run_stderr template=jinja "ls -l /tmp/{{grains.id}} | awk '/foo/{print \\$2}'"
A string of standard input can be specified for the command to be run using
the ``stdin`` parameter. This can be useful in cases where sensitive
information must be read from standard input.:
.. code-block:: bash
salt '*' cmd.run_stderr "grep f" stdin='one\\ntwo\\nthree\\nfour\\nfive\\n'
'''
python_shell = _python_shell_default(python_shell,
kwargs.get('__pub_jid', ''))
ret = _run(cmd,
runas=runas,
cwd=cwd,
stdin=stdin,
shell=shell,
python_shell=python_shell,
env=env,
clean_env=clean_env,
template=template,
rstrip=rstrip,
umask=umask,
output_loglevel=output_loglevel,
log_callback=log_callback,
timeout=timeout,
reset_system_locale=reset_system_locale,
ignore_retcode=ignore_retcode,
use_vt=use_vt,
saltenv=saltenv,
password=password,
**kwargs)
log_callback = _check_cb(log_callback)
lvl = _check_loglevel(output_loglevel)
if lvl is not None:
if not ignore_retcode and ret['retcode'] != 0:
if lvl < LOG_LEVELS['error']:
lvl = LOG_LEVELS['error']
msg = (
'Command \'{0}\' failed with return code: {1}'.format(
cmd,
ret['retcode']
)
)
log.error(log_callback(msg))
if ret['stdout']:
log.log(lvl, 'stdout: {0}'.format(log_callback(ret['stdout'])))
if ret['stderr']:
log.log(lvl, 'stderr: {0}'.format(log_callback(ret['stderr'])))
if ret['retcode']:
log.log(lvl, 'retcode: {0}'.format(ret['retcode']))
return ret['stderr']
def run_all(cmd,
cwd=None,
stdin=None,
runas=None,
shell=DEFAULT_SHELL,
python_shell=None,
env=None,
clean_env=False,
template=None,
rstrip=True,
umask=None,
output_loglevel='debug',
log_callback=None,
timeout=None,
reset_system_locale=True,
ignore_retcode=False,
saltenv='base',
use_vt=False,
redirect_stderr=False,
password=None,
**kwargs):
'''
Execute the passed command and return a dict of return data
:param str cmd: The command to run. ex: 'ls -lart /home'
:param str cwd: The current working directory to execute the command in,
defaults to /root
:param str stdin: A string of standard input can be specified for the
command to be run using the ``stdin`` parameter. This can be useful in cases
where sensitive information must be read from standard input.:
:param str runas: User to run script as. If running on a Windows minion you
must also pass a password
:param str password: Windows only. Required when specifying ``runas``. This
parameter will be ignored on non-Windows platforms.
.. versionadded:: 2016.3.0
:param str shell: Shell to execute under. Defaults to the system default
shell.
:param bool python_shell: If False, let python handle the positional
arguments. Set to True to use shell features, such as pipes or redirection
:param list env: A list of environment variables to be set prior to
execution.
Example:
.. code-block:: yaml
salt://scripts/foo.sh:
cmd.script:
- env:
- BATCH: 'yes'
.. warning::
The above illustrates a common PyYAML pitfall, that **yes**,
**no**, **on**, **off**, **true**, and **false** are all loaded as
boolean ``True`` and ``False`` values, and must be enclosed in
quotes to be used as strings. More info on this (and other) PyYAML
idiosyncrasies can be found :doc:`here
</topics/troubleshooting/yaml_idiosyncrasies>`.
Variables as values are not evaluated. So $PATH in the following
example is a literal '$PATH':
.. code-block:: yaml
salt://scripts/bar.sh:
cmd.script:
- env: "PATH=/some/path:$PATH"
One can still use the existing $PATH by using a bit of Jinja:
.. code-block:: yaml
{% set current_path = salt['environ.get']('PATH', '/bin:/usr/bin') %}
mycommand:
cmd.run:
- name: ls -l /
- env:
- PATH: {{ [current_path, '/my/special/bin']|join(':') }}
:param bool clean_env: Attempt to clean out all other shell environment
variables and set only those provided in the 'env' argument to this
function.
:param str template: If this setting is applied then the named templating
engine will be used to render the downloaded file. Currently jinja, mako,
and wempy are supported
:param bool rstrip: Strip all whitespace off the end of output before it is
returned.
:param str umask: The umask (in octal) to use when running the command.
:param str output_loglevel: Control the loglevel at which the output from
the command is logged. Note that the command being run will still be logged
(loglevel: DEBUG) regardless, unless ``quiet`` is used for this value.
:param int timeout: A timeout in seconds for the executed process to return.
:param bool use_vt: Use VT utils (saltstack) to stream the command output
more interactively to the console and the logs. This is experimental.
.. note::
``env`` represents the environment variables for the command, and
should be formatted as a dict, or a YAML string which resolves to a dict.
redirect_stderr : False
If set to ``True``, then stderr will be redirected to stdout. This is
helpful for cases where obtaining both the retcode and output is
desired, but it is not desired to have the output separated into both
stdout and stderr.
.. versionadded:: 2015.8.2
CLI Example:
.. code-block:: bash
salt '*' cmd.run_all "ls -l | awk '/foo/{print \\$2}'"
The template arg can be set to 'jinja' or another supported template
engine to render the command arguments before execution.
For example:
.. code-block:: bash
salt '*' cmd.run_all template=jinja "ls -l /tmp/{{grains.id}} | awk '/foo/{print \\$2}'"
A string of standard input can be specified for the command to be run using
the ``stdin`` parameter. This can be useful in cases where sensitive
information must be read from standard input.:
.. code-block:: bash
salt '*' cmd.run_all "grep f" stdin='one\\ntwo\\nthree\\nfour\\nfive\\n'
'''
python_shell = _python_shell_default(python_shell,
kwargs.get('__pub_jid', ''))
stderr = subprocess.STDOUT if redirect_stderr else subprocess.PIPE
ret = _run(cmd,
runas=runas,
cwd=cwd,
stdin=stdin,
stderr=stderr,
shell=shell,
python_shell=python_shell,
env=env,
clean_env=clean_env,
template=template,
rstrip=rstrip,
umask=umask,
output_loglevel=output_loglevel,
log_callback=log_callback,
timeout=timeout,
reset_system_locale=reset_system_locale,
ignore_retcode=ignore_retcode,
saltenv=saltenv,
use_vt=use_vt,
password=password,
**kwargs)
log_callback = _check_cb(log_callback)
lvl = _check_loglevel(output_loglevel)
if lvl is not None:
if not ignore_retcode and ret['retcode'] != 0:
if lvl < LOG_LEVELS['error']:
lvl = LOG_LEVELS['error']
msg = (
'Command \'{0}\' failed with return code: {1}'.format(
cmd,
ret['retcode']
)
)
log.error(log_callback(msg))
if ret['stdout']:
log.log(lvl, 'stdout: {0}'.format(log_callback(ret['stdout'])))
if ret['stderr']:
log.log(lvl, 'stderr: {0}'.format(log_callback(ret['stderr'])))
if ret['retcode']:
log.log(lvl, 'retcode: {0}'.format(ret['retcode']))
return ret
def retcode(cmd,
cwd=None,
stdin=None,
runas=None,
shell=DEFAULT_SHELL,
python_shell=None,
env=None,
clean_env=False,
template=None,
umask=None,
output_loglevel='debug',
log_callback=None,
timeout=None,
reset_system_locale=True,
ignore_retcode=False,
saltenv='base',
use_vt=False,
password=None,
**kwargs):
'''
Execute a shell command and return the command's return code.
:param str cmd: The command to run. ex: 'ls -lart /home'
:param str cwd: The current working directory to execute the command in,
defaults to /root
:param str stdin: A string of standard input can be specified for the
command to be run using the ``stdin`` parameter. This can be useful in cases
where sensitive information must be read from standard input.:
:param str runas: User to run script as. If running on a Windows minion you
must also pass a password
:param str password: Windows only. Required when specifying ``runas``. This
parameter will be ignored on non-Windows platforms.
.. versionadded:: 2016.3.0
:param str shell: Shell to execute under. Defaults to the system default
shell.
:param bool python_shell: If False, let python handle the positional
arguments. Set to True to use shell features, such as pipes or redirection
:param list env: A list of environment variables to be set prior to
execution.
Example:
.. code-block:: yaml
salt://scripts/foo.sh:
cmd.script:
- env:
- BATCH: 'yes'
.. warning::
The above illustrates a common PyYAML pitfall, that **yes**,
**no**, **on**, **off**, **true**, and **false** are all loaded as
boolean ``True`` and ``False`` values, and must be enclosed in
quotes to be used as strings. More info on this (and other) PyYAML
idiosyncrasies can be found :doc:`here
</topics/troubleshooting/yaml_idiosyncrasies>`.
Variables as values are not evaluated. So $PATH in the following
example is a literal '$PATH':
.. code-block:: yaml
salt://scripts/bar.sh:
cmd.script:
- env: "PATH=/some/path:$PATH"
One can still use the existing $PATH by using a bit of Jinja:
.. code-block:: yaml
{% set current_path = salt['environ.get']('PATH', '/bin:/usr/bin') %}
mycommand:
cmd.run:
- name: ls -l /
- env:
- PATH: {{ [current_path, '/my/special/bin']|join(':') }}
:param bool clean_env: Attempt to clean out all other shell environment
variables and set only those provided in the 'env' argument to this
function.
:param str template: If this setting is applied then the named templating
engine will be used to render the downloaded file. Currently jinja, mako,
and wempy are supported
:param bool rstrip: Strip all whitespace off the end of output before it is
returned.
:param str umask: The umask (in octal) to use when running the command.
:param str output_loglevel: Control the loglevel at which the output from
the command is logged. Note that the command being run will still be logged
(loglevel: DEBUG) regardless, unless ``quiet`` is used for this value.
:param int timeout: A timeout in seconds for the executed process to return.
:param bool use_vt: Use VT utils (saltstack) to stream the command output
more interactively to the console and the logs. This is experimental.
.. note::
``env`` represents the environment variables for the command, and
should be formatted as a dict, or a YAML string which resolves to a dict.
:rtype: int
:rtype: None
:returns: Return Code as an int or None if there was an exception.
CLI Example:
.. code-block:: bash
salt '*' cmd.retcode "file /bin/bash"
The template arg can be set to 'jinja' or another supported template
engine to render the command arguments before execution.
For example:
.. code-block:: bash
salt '*' cmd.retcode template=jinja "file {{grains.pythonpath[0]}}/python"
A string of standard input can be specified for the command to be run using
the ``stdin`` parameter. This can be useful in cases where sensitive
information must be read from standard input.:
.. code-block:: bash
salt '*' cmd.retcode "grep f" stdin='one\\ntwo\\nthree\\nfour\\nfive\\n'
'''
ret = _run(cmd,
runas=runas,
cwd=cwd,
stdin=stdin,
stderr=subprocess.STDOUT,
shell=shell,
python_shell=python_shell,
env=env,
clean_env=clean_env,
template=template,
umask=umask,
output_loglevel=output_loglevel,
log_callback=log_callback,
timeout=timeout,
reset_system_locale=reset_system_locale,
ignore_retcode=ignore_retcode,
saltenv=saltenv,
use_vt=use_vt,
password=password,
**kwargs)
log_callback = _check_cb(log_callback)
lvl = _check_loglevel(output_loglevel)
if lvl is not None:
if not ignore_retcode and ret['retcode'] != 0:
if lvl < LOG_LEVELS['error']:
lvl = LOG_LEVELS['error']
msg = (
'Command \'{0}\' failed with return code: {1}'.format(
cmd,
ret['retcode']
)
)
log.error(log_callback(msg))
log.log(lvl, 'output: {0}'.format(log_callback(ret['stdout'])))
return ret['retcode']
def _retcode_quiet(cmd,
cwd=None,
stdin=None,
runas=None,
shell=DEFAULT_SHELL,
python_shell=False,
env=None,
clean_env=False,
template=None,
umask=None,
output_loglevel='quiet',
log_callback=None,
timeout=None,
reset_system_locale=True,
ignore_retcode=False,
saltenv='base',
use_vt=False,
password=None,
**kwargs):
'''
Helper for running commands quietly for minion startup.
Returns same as retcode
'''
return retcode(cmd,
cwd=cwd,
stdin=stdin,
runas=runas,
shell=shell,
python_shell=python_shell,
env=env,
clean_env=clean_env,
template=template,
umask=umask,
output_loglevel=output_loglevel,
log_callback=log_callback,
timeout=timeout,
reset_system_locale=reset_system_locale,
ignore_retcode=ignore_retcode,
saltenv=saltenv,
use_vt=use_vt,
password=password,
**kwargs)
def script(source,
args=None,
cwd=None,
stdin=None,
runas=None,
shell=DEFAULT_SHELL,
python_shell=None,
env=None,
template=None,
umask=None,
output_loglevel='debug',
log_callback=None,
quiet=False,
timeout=None,
reset_system_locale=True,
saltenv='base',
use_vt=False,
bg=False,
password=None,
**kwargs):
'''
Download a script from a remote location and execute the script locally.
The script can be located on the salt master file server or on an HTTP/FTP
server.
The script will be executed directly, so it can be written in any available
programming language.
:param str source: The location of the script to download. If the file is
located on the master in the directory named spam, and is called eggs, the
source string is salt://spam/eggs
:param str args: String of command line args to pass to the script. Only
used if no args are specified as part of the `name` argument. To pass a
string containing spaces in YAML, you will need to doubly-quote it:
"arg1 'arg two' arg3"
:param str cwd: The current working directory to execute the command in,
defaults to /root
:param str stdin: A string of standard input can be specified for the
command to be run using the ``stdin`` parameter. This can be useful in cases
where sensitive information must be read from standard input.:
:param str runas: User to run script as. If running on a Windows minion you
must also pass a password
:param str password: Windows only. Required when specifying ``runas``. This
parameter will be ignored on non-Windows platforms.
.. versionadded:: 2016.3.0
:param str shell: Shell to execute under. Defaults to the system default
shell.
:param bool python_shell: If False, let python handle the positional
arguments. Set to True to use shell features, such as pipes or redirection
:param bool bg: If True, run script in background and do not await or deliver it's results
:param list env: A list of environment variables to be set prior to
execution.
Example:
.. code-block:: yaml
salt://scripts/foo.sh:
cmd.script:
- env:
- BATCH: 'yes'
.. warning::
The above illustrates a common PyYAML pitfall, that **yes**,
**no**, **on**, **off**, **true**, and **false** are all loaded as
boolean ``True`` and ``False`` values, and must be enclosed in
quotes to be used as strings. More info on this (and other) PyYAML
idiosyncrasies can be found :doc:`here
</topics/troubleshooting/yaml_idiosyncrasies>`.
Variables as values are not evaluated. So $PATH in the following
example is a literal '$PATH':
.. code-block:: yaml
salt://scripts/bar.sh:
cmd.script:
- env: "PATH=/some/path:$PATH"
One can still use the existing $PATH by using a bit of Jinja:
.. code-block:: yaml
{% set current_path = salt['environ.get']('PATH', '/bin:/usr/bin') %}
mycommand:
cmd.run:
- name: ls -l /
- env:
- PATH: {{ [current_path, '/my/special/bin']|join(':') }}
:param str template: If this setting is applied then the named templating
engine will be used to render the downloaded file. Currently jinja, mako,
and wempy are supported
:param str umask: The umask (in octal) to use when running the command.
:param str output_loglevel: Control the loglevel at which the output from
the command is logged. Note that the command being run will still be logged
(loglevel: DEBUG)regardless, unless ``quiet`` is used for this value.
:param bool quiet: The command will be executed quietly, meaning no log
entries of the actual command or its return data. This is deprecated as of
the **2014.1.0** release, and is being replaced with ``output_loglevel: quiet``.
:param int timeout: If the command has not terminated after timeout seconds,
send the subprocess sigterm, and if sigterm is ignored, follow up with
sigkill
:param bool use_vt: Use VT utils (saltstack) to stream the command output
more interactively to the console and the logs. This is experimental.
CLI Example:
.. code-block:: bash
salt '*' cmd.script salt://scripts/runme.sh
salt '*' cmd.script salt://scripts/runme.sh 'arg1 arg2 "arg 3"'
salt '*' cmd.script salt://scripts/windows_task.ps1 args=' -Input c:\\tmp\\infile.txt' shell='powershell'
.. code-block:: bash
salt '*' cmd.script salt://scripts/runme.sh stdin='one\\ntwo\\nthree\\nfour\\nfive\\n'
'''
python_shell = _python_shell_default(python_shell,
kwargs.get('__pub_jid', ''))
def _cleanup_tempfile(path):
try:
__salt__['file.remove'](path)
except (SaltInvocationError, CommandExecutionError) as exc:
log.error(
'cmd.script: Unable to clean tempfile \'{0}\': {1}'.format(
path,
exc
)
)
if '__env__' in kwargs:
salt.utils.warn_until(
'Oxygen',
'Parameter \'__env__\' has been detected in the argument list. This '
'parameter is no longer used and has been replaced by \'saltenv\' '
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
)
kwargs.pop('__env__')
if salt.utils.is_windows() and runas and cwd is None:
cwd = tempfile.mkdtemp(dir=__opts__['cachedir'])
__salt__['win_dacl.add_ace'](
cwd, 'File', runas, 'READ&EXECUTE', 'ALLOW',
'FOLDER&SUBFOLDERS&FILES')
path = salt.utils.files.mkstemp(dir=cwd, suffix=os.path.splitext(source)[1])
if template:
if 'pillarenv' in kwargs or 'pillar' in kwargs:
pillarenv = kwargs.get('pillarenv', __opts__.get('pillarenv'))
kwargs['pillar'] = _gather_pillar(pillarenv, kwargs.get('pillar'))
fn_ = __salt__['cp.get_template'](source,
path,
template,
saltenv,
**kwargs)
if not fn_:
if salt.utils.is_windows() and runas:
_cleanup_tempfile(cwd)
else:
_cleanup_tempfile(path)
return {'pid': 0,
'retcode': 1,
'stdout': '',
'stderr': '',
'cache_error': True}
else:
fn_ = __salt__['cp.cache_file'](source, saltenv)
if not fn_:
if salt.utils.is_windows() and runas:
_cleanup_tempfile(cwd)
else:
_cleanup_tempfile(path)
return {'pid': 0,
'retcode': 1,
'stdout': '',
'stderr': '',
'cache_error': True}
shutil.copyfile(fn_, path)
if not salt.utils.is_windows():
os.chmod(path, 320)
os.chown(path, __salt__['file.user_to_uid'](runas), -1)
ret = _run(path + ' ' + str(args) if args else path,
cwd=cwd,
stdin=stdin,
output_loglevel=output_loglevel,
log_callback=log_callback,
runas=runas,
shell=shell,
python_shell=python_shell,
env=env,
umask=umask,
timeout=timeout,
reset_system_locale=reset_system_locale,
saltenv=saltenv,
use_vt=use_vt,
bg=bg,
password=password,
**kwargs)
if salt.utils.is_windows() and runas:
_cleanup_tempfile(cwd)
else:
_cleanup_tempfile(path)
return ret
def script_retcode(source,
args=None,
cwd=None,
stdin=None,
runas=None,
shell=DEFAULT_SHELL,
python_shell=None,
env=None,
template='jinja',
umask=None,
timeout=None,
reset_system_locale=True,
saltenv='base',
output_loglevel='debug',
log_callback=None,
use_vt=False,
password=None,
**kwargs):
'''
Download a script from a remote location and execute the script locally.
The script can be located on the salt master file server or on an HTTP/FTP
server.
The script will be executed directly, so it can be written in any available
programming language.
The script can also be formatted as a template, the default is jinja.
Only evaluate the script return code and do not block for terminal output
:param str source: The location of the script to download. If the file is
located on the master in the directory named spam, and is called eggs, the
source string is salt://spam/eggs
:param str args: String of command line args to pass to the script. Only
used if no args are specified as part of the `name` argument. To pass a
string containing spaces in YAML, you will need to doubly-quote it: "arg1
'arg two' arg3"
:param str cwd: The current working directory to execute the command in,
defaults to /root
:param str stdin: A string of standard input can be specified for the
command to be run using the ``stdin`` parameter. This can be useful in cases
where sensitive information must be read from standard input.:
:param str runas: User to run script as. If running on a Windows minion you
must also pass a password
:param str password: Windows only. Required when specifying ``runas``. This
parameter will be ignored on non-Windows platforms.
.. versionadded:: 2016.3.0
:param str shell: Shell to execute under. Defaults to the system default
shell.
:param bool python_shell: If False, let python handle the positional
arguments. Set to True to use shell features, such as pipes or redirection
:param list env: A list of environment variables to be set prior to
execution.
Example:
.. code-block:: yaml
salt://scripts/foo.sh:
cmd.script:
- env:
- BATCH: 'yes'
.. warning::
The above illustrates a common PyYAML pitfall, that **yes**,
**no**, **on**, **off**, **true**, and **false** are all loaded as
boolean ``True`` and ``False`` values, and must be enclosed in
quotes to be used as strings. More info on this (and other) PyYAML
idiosyncrasies can be found :doc:`here
</topics/troubleshooting/yaml_idiosyncrasies>`.
Variables as values are not evaluated. So $PATH in the following
example is a literal '$PATH':
.. code-block:: yaml
salt://scripts/bar.sh:
cmd.script:
- env: "PATH=/some/path:$PATH"
One can still use the existing $PATH by using a bit of Jinja:
.. code-block:: yaml
{% set current_path = salt['environ.get']('PATH', '/bin:/usr/bin') %}
mycommand:
cmd.run:
- name: ls -l /
- env:
- PATH: {{ [current_path, '/my/special/bin']|join(':') }}
:param str template: If this setting is applied then the named templating
engine will be used to render the downloaded file. Currently jinja, mako,
and wempy are supported
:param str umask: The umask (in octal) to use when running the command.
:param str output_loglevel: Control the loglevel at which the output from
the command is logged. Note that the command being run will still be logged
(loglevel: DEBUG) regardless, unless ``quiet`` is used for this value.
:param bool quiet: The command will be executed quietly, meaning no log
entries of the actual command or its return data. This is deprecated as of
the **2014.1.0** release, and is being replaced with ``output_loglevel:
quiet``.
:param int timeout: If the command has not terminated after timeout seconds,
send the subprocess sigterm, and if sigterm is ignored, follow up with
sigkill
:param bool use_vt: Use VT utils (saltstack) to stream the command output
more interactively to the console and the logs. This is experimental.
CLI Example:
.. code-block:: bash
salt '*' cmd.script_retcode salt://scripts/runme.sh
salt '*' cmd.script_retcode salt://scripts/runme.sh 'arg1 arg2 "arg 3"'
salt '*' cmd.script_retcode salt://scripts/windows_task.ps1 args=' -Input c:\\tmp\\infile.txt' shell='powershell'
A string of standard input can be specified for the command to be run using
the ``stdin`` parameter. This can be useful in cases where sensitive
information must be read from standard input.:
.. code-block:: bash
salt '*' cmd.script_retcode salt://scripts/runme.sh stdin='one\\ntwo\\nthree\\nfour\\nfive\\n'
'''
if '__env__' in kwargs:
salt.utils.warn_until(
'Oxygen',
'Parameter \'__env__\' has been detected in the argument list. This '
'parameter is no longer used and has been replaced by \'saltenv\' '
'as of Salt 2016.11.0. This warning will be removed in Salt Oxygen.'
)
kwargs.pop('__env__')
return script(source=source,
args=args,
cwd=cwd,
stdin=stdin,
runas=runas,
shell=shell,
python_shell=python_shell,
env=env,
template=template,
umask=umask,
timeout=timeout,
reset_system_locale=reset_system_locale,
saltenv=saltenv,
output_loglevel=output_loglevel,
log_callback=log_callback,
use_vt=use_vt,
password=password,
**kwargs)['retcode']
def which(cmd):
'''
Returns the path of an executable available on the minion, None otherwise
CLI Example:
.. code-block:: bash
salt '*' cmd.which cat
'''
return salt.utils.which(cmd)
def which_bin(cmds):
'''
Returns the first command found in a list of commands
CLI Example:
.. code-block:: bash
salt '*' cmd.which_bin '[pip2, pip, pip-python]'
'''
return salt.utils.which_bin(cmds)
def has_exec(cmd):
'''
Returns true if the executable is available on the minion, false otherwise
CLI Example:
.. code-block:: bash
salt '*' cmd.has_exec cat
'''
return which(cmd) is not None
def exec_code(lang, code, cwd=None):
'''
Pass in two strings, the first naming the executable language, aka -
python2, python3, ruby, perl, lua, etc. the second string containing
the code you wish to execute. The stdout will be returned.
CLI Example:
.. code-block:: bash
salt '*' cmd.exec_code ruby 'puts "cheese"'
'''
return exec_code_all(lang, code, cwd)['stdout']
def exec_code_all(lang, code, cwd=None):
'''
Pass in two strings, the first naming the executable language, aka -
python2, python3, ruby, perl, lua, etc. the second string containing
the code you wish to execute. All cmd artifacts (stdout, stderr, retcode, pid)
will be returned.
CLI Example:
.. code-block:: bash
salt '*' cmd.exec_code_all ruby 'puts "cheese"'
'''
powershell = lang.lower().startswith("powershell")
if powershell:
codefile = salt.utils.files.mkstemp(suffix=".ps1")
else:
codefile = salt.utils.files.mkstemp()
with salt.utils.fopen(codefile, 'w+t', binary=False) as fp_:
fp_.write(code)
if powershell:
cmd = [lang, "-File", codefile]
else:
cmd = [lang, codefile]
ret = run_all(cmd, cwd=cwd, python_shell=False)
os.remove(codefile)
return ret
def tty(device, echo=None):
'''
Echo a string to a specific tty
CLI Example:
.. code-block:: bash
salt '*' cmd.tty tty0 'This is a test'
salt '*' cmd.tty pts3 'This is a test'
'''
if device.startswith('tty'):
teletype = '/dev/{0}'.format(device)
elif device.startswith('pts'):
teletype = '/dev/{0}'.format(device.replace('pts', 'pts/'))
else:
return {'Error': 'The specified device is not a valid TTY'}
try:
with salt.utils.fopen(teletype, 'wb') as tty_device:
tty_device.write(echo)
return {
'Success': 'Message was successfully echoed to {0}'.format(teletype)
}
except IOError:
return {
'Error': 'Echoing to {0} returned error'.format(teletype)
}
def run_chroot(root,
cmd,
cwd=None,
stdin=None,
runas=None,
shell=DEFAULT_SHELL,
python_shell=True,
env=None,
clean_env=False,
template=None,
rstrip=True,
umask=None,
output_loglevel='quiet',
log_callback=None,
quiet=False,
timeout=None,
reset_system_locale=True,
ignore_retcode=False,
saltenv='base',
use_vt=False,
bg=False,
**kwargs):
'''
.. versionadded:: 2014.7.0
This function runs :mod:`cmd.run_all <salt.modules.cmdmod.run_all>` wrapped
within a chroot, with dev and proc mounted in the chroot
root
Path to the root of the jail to use.
cmd
The command to run. ex: 'ls -lart /home'
cwd
The current working directory to execute the command in, defaults to
/root
stdin
A string of standard input can be specified for the command to be run using
the ``stdin`` parameter. This can be useful in cases where sensitive
information must be read from standard input.:
runas
User to run script as.
shell
Shell to execute under. Defaults to the system default shell.
python_shell
If False, let python handle the positional arguments. Set to True
to use shell features, such as pipes or redirection
env
A list of environment variables to be set prior to execution.
Example:
.. code-block:: yaml
salt://scripts/foo.sh:
cmd.script:
- env:
- BATCH: 'yes'
.. warning::
The above illustrates a common PyYAML pitfall, that **yes**,
**no**, **on**, **off**, **true**, and **false** are all loaded as
boolean ``True`` and ``False`` values, and must be enclosed in
quotes to be used as strings. More info on this (and other) PyYAML
idiosyncrasies can be found :doc:`here
</topics/troubleshooting/yaml_idiosyncrasies>`.
Variables as values are not evaluated. So $PATH in the following
example is a literal '$PATH':
.. code-block:: yaml
salt://scripts/bar.sh:
cmd.script:
- env: "PATH=/some/path:$PATH"
One can still use the existing $PATH by using a bit of Jinja:
.. code-block:: yaml
{% set current_path = salt['environ.get']('PATH', '/bin:/usr/bin') %}
mycommand:
cmd.run:
- name: ls -l /
- env:
- PATH: {{ [current_path, '/my/special/bin']|join(':') }}
clean_env:
Attempt to clean out all other shell environment variables and set
only those provided in the 'env' argument to this function.
template
If this setting is applied then the named templating engine will be
used to render the downloaded file. Currently jinja, mako, and wempy
are supported
rstrip
Strip all whitespace off the end of output before it is returned.
umask
The umask (in octal) to use when running the command.
output_loglevel
Control the loglevel at which the output from the command is logged.
Note that the command being run will still be logged (loglevel: DEBUG)
regardless, unless ``quiet`` is used for this value.
timeout
A timeout in seconds for the executed process to return.
use_vt
Use VT utils (saltstack) to stream the command output more
interactively to the console and the logs.
This is experimental.
CLI Example:
.. code-block:: bash
salt '*' cmd.run_chroot /var/lib/lxc/container_name/rootfs 'sh /tmp/bootstrap.sh'
'''
__salt__['mount.mount'](
os.path.join(root, 'dev'),
'udev',
fstype='devtmpfs')
__salt__['mount.mount'](
os.path.join(root, 'proc'),
'proc',
fstype='proc')
# Execute chroot routine
sh_ = '/bin/sh'
if os.path.isfile(os.path.join(root, 'bin/bash')):
sh_ = '/bin/bash'
if isinstance(cmd, (list, tuple)):
cmd = ' '.join([str(i) for i in cmd])
cmd = 'chroot {0} {1} -c {2}'.format(root, sh_, _cmd_quote(cmd))
run_func = __context__.pop('cmd.run_chroot.func', run_all)
ret = run_func(cmd,
runas=runas,
cwd=cwd,
stdin=stdin,
shell=shell,
python_shell=python_shell,
env=env,
clean_env=clean_env,
template=template,
rstrip=rstrip,
umask=umask,
output_loglevel=output_loglevel,
log_callback=log_callback,
quiet=quiet,
timeout=timeout,
reset_system_locale=reset_system_locale,
ignore_retcode=ignore_retcode,
saltenv=saltenv,
pillarenv=kwargs.get('pillarenv'),
pillar=kwargs.get('pillar'),
use_vt=use_vt,
bg=bg)
# Kill processes running in the chroot
for i in range(6):
pids = _chroot_pids(root)
if not pids:
break
for pid in pids:
# use sig 15 (TERM) for first 3 attempts, then 9 (KILL)
sig = 15 if i < 3 else 9
os.kill(pid, sig)
if _chroot_pids(root):
log.error('Processes running in chroot could not be killed, '
'filesystem will remain mounted')
__salt__['mount.umount'](os.path.join(root, 'proc'))
__salt__['mount.umount'](os.path.join(root, 'dev'))
return ret
def _is_valid_shell(shell):
'''
Attempts to search for valid shells on a system and
see if a given shell is in the list
'''
if salt.utils.is_windows():
return True # Don't even try this for Windows
shells = '/etc/shells'
available_shells = []
if os.path.exists(shells):
try:
with salt.utils.fopen(shells, 'r') as shell_fp:
lines = shell_fp.read().splitlines()
for line in lines:
if line.startswith('#'):
continue
else:
available_shells.append(line)
except OSError:
return True
else:
# No known method of determining available shells
return None
if shell in available_shells:
return True
else:
return False
def shells():
'''
Lists the valid shells on this system via the /etc/shells file
.. versionadded:: 2015.5.0
CLI Example::
salt '*' cmd.shells
'''
shells_fn = '/etc/shells'
ret = []
if os.path.exists(shells_fn):
try:
with salt.utils.fopen(shells_fn, 'r') as shell_fp:
lines = shell_fp.read().splitlines()
for line in lines:
line = line.strip()
if line.startswith('#'):
continue
elif not line:
continue
else:
ret.append(line)
except OSError:
log.error("File '{0}' was not found".format(shells_fn))
return ret
def shell_info(shell, list_modules=False):
'''
.. versionadded:: 2016.11.0
Provides information about a shell or script languages which often use
``#!``. The values returned are dependant on the shell or scripting
languages all return the ``installed``, ``path``, ``version``,
``version_raw``
Args:
shell (str): Name of the shell. Support shells/script languages include
bash, cmd, perl, php, powershell, python, ruby and zsh
list_modules (bool): True to list modules available to the shell.
Currently only lists powershell modules.
Returns:
dict: A dictionary of information about the shell
.. code-block:: python
{'version': '<2 or 3 numeric components dot-separated>',
'version_raw': '<full version string>',
'path': '<full path to binary>',
'installed': <True, False or None>,
'<attribute>': '<attribute value>'}
.. note::
- ``installed`` is always returned, if ``None`` or ``False`` also
returns error and may also return ``stdout`` for diagnostics.
- ``version`` is for use in determine if a shell/script language has a
particular feature set, not for package management.
- The shell must be within the executable search path.
CLI Example:
.. code-block:: bash
salt '*' cmd.shell_info bash
salt '*' cmd.shell_info powershell
:codeauthor: Damon Atkins <https://github.com/damon-atkins>
'''
regex_shells = {
'bash': [r'version (\d\S*)', 'bash', '--version'],
'bash-test-error': [r'versioZ ([-\w.]+)', 'bash', '--version'], # used to test a error result
'bash-test-env': [r'(HOME=.*)', 'bash', '-c', 'declare'], # used to test a error result
'zsh': [r'^zsh (\d\S*)', 'zsh', '--version'],
'tcsh': [r'^tcsh (\d\S*)', 'tcsh', '--version'],
'cmd': [r'Version ([\d.]+)', 'cmd.exe', '/C', 'ver'],
'powershell': [r'PSVersion\s+(\d\S*)', 'powershell', '-NonInteractive', '$PSVersionTable'],
'perl': [r'^(\d\S*)', 'perl', '-e', 'printf "%vd\n", $^V;'],
'python': [r'^Python (\d\S*)', 'python', '-V'],
'ruby': [r'^ruby (\d\S*)', 'ruby', '-v'],
'php': [r'^PHP (\d\S*)', 'php', '-v']
}
# Ensure ret['installed'] always as a value of True, False or None (not sure)
ret = {'installed': False}
if salt.utils.is_windows() and shell == 'powershell':
pw_keys = __salt__['reg.list_keys'](
'HKEY_LOCAL_MACHINE',
'Software\\Microsoft\\PowerShell')
pw_keys.sort(key=int)
if len(pw_keys) == 0:
return {
'error': 'Unable to locate \'powershell\' Reason: Cannot be '
'found in registry.',
'installed': False,
}
for reg_ver in pw_keys:
install_data = __salt__['reg.read_value'](
'HKEY_LOCAL_MACHINE',
'Software\\Microsoft\\PowerShell\\{0}'.format(reg_ver),
'Install')
if 'vtype' in install_data and \
install_data['vtype'] == 'REG_DWORD' and \
install_data['vdata'] == 1:
details = __salt__['reg.list_values'](
'HKEY_LOCAL_MACHINE',
'Software\\Microsoft\\PowerShell\\{0}\\'
'PowerShellEngine'.format(reg_ver))
# reset data, want the newest version details only as powershell
# is backwards compatible
ret = {}
# if all goes well this will become True
ret['installed'] = None
ret['path'] = which('powershell.exe')
for attribute in details:
if attribute['vname'].lower() == '(default)':
continue
elif attribute['vname'].lower() == 'powershellversion':
ret['psversion'] = attribute['vdata']
ret['version_raw'] = attribute['vdata']
elif attribute['vname'].lower() == 'runtimeversion':
ret['crlversion'] = attribute['vdata']
if ret['crlversion'][0].lower() == 'v':
ret['crlversion'] = ret['crlversion'][1::]
elif attribute['vname'].lower() == 'pscompatibleversion':
# reg attribute does not end in s, the powershell
# attribute does
ret['pscompatibleversions'] = \
attribute['vdata'].replace(' ', '').split(',')
else:
# keys are lower case as python is case sensitive the
# registry is not
ret[attribute['vname'].lower()] = attribute['vdata']
else:
if shell not in regex_shells:
return {
'error': 'Salt does not know how to get the version number for '
'{0}'.format(shell),
'installed': None
}
shell_data = regex_shells[shell]
pattern = shell_data.pop(0)
# We need to make sure HOME set, so shells work correctly
# salt-call will general have home set, the salt-minion service may not
# We need to assume ports of unix shells to windows will look after
# themselves in setting HOME as they do it in many different ways
newenv = os.environ
if ('HOME' not in newenv) and (not salt.utils.is_windows()):
newenv['HOME'] = os.path.expanduser('~')
log.debug('HOME environment set to {0}'.format(newenv['HOME']))
try:
proc = salt.utils.timed_subprocess.TimedProc(
shell_data,
stdin=None,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
timeout=10,
env=newenv
)
except (OSError, IOError) as exc:
return {
'error': 'Unable to run command \'{0}\' Reason: {1}'.format(' '.join(shell_data), exc),
'installed': False,
}
try:
proc.run()
except TimedProcTimeoutError as exc:
return {
'error': 'Unable to run command \'{0}\' Reason: Timed out.'.format(' '.join(shell_data)),
'installed': False,
}
ret['path'] = which(shell_data[0])
pattern_result = re.search(pattern, proc.stdout, flags=re.IGNORECASE)
# only set version if we find it, so code later on can deal with it
if pattern_result:
ret['version_raw'] = pattern_result.group(1)
if 'version_raw' in ret:
version_results = re.match(r'(\d[\d.]*)', ret['version_raw'])
if version_results:
ret['installed'] = True
ver_list = version_results.group(1).split('.')[:3]
if len(ver_list) == 1:
ver_list.append('0')
ret['version'] = '.'.join(ver_list[:3])
else:
ret['installed'] = None # Have an unexpected result
# Get a list of the PowerShell modules which are potentially available
# to be imported
if shell == 'powershell' and ret['installed'] and list_modules:
ret['modules'] = salt.utils.powershell.get_modules()
if 'version' not in ret:
ret['error'] = 'The version regex pattern for shell {0}, could not ' \
'find the version string'.format(shell)
ret['stdout'] = proc.stdout # include stdout so they can see the issue
log.error(ret['error'])
return ret
def powershell(cmd,
cwd=None,
stdin=None,
runas=None,
shell=DEFAULT_SHELL,
env=None,
clean_env=False,
template=None,
rstrip=True,
umask=None,
output_loglevel='debug',
quiet=False,
timeout=None,
reset_system_locale=True,
ignore_retcode=False,
saltenv='base',
use_vt=False,
password=None,
depth=None,
encode_cmd=False,
**kwargs):
'''
Execute the passed PowerShell command and return the output as a dictionary.
Other ``cmd.*`` functions return the raw text output of the command. This
function appends ``| ConvertTo-JSON`` to the command and then parses the
JSON into a Python dictionary. If you want the raw textual result of your
PowerShell command you should use ``cmd.run`` with the ``shell=powershell``
option.
For example:
.. code-block:: bash
salt '*' cmd.run '$PSVersionTable.CLRVersion' shell=powershell
salt '*' cmd.run 'Get-NetTCPConnection' shell=powershell
.. versionadded:: 2016.3.0
.. warning::
This passes the cmd argument directly to PowerShell
without any further processing! Be absolutely sure that you
have properly sanitized the command passed to this function
and do not use untrusted inputs.
Note that ``env`` represents the environment variables for the command, and
should be formatted as a dict, or a YAML string which resolves to a dict.
In addition to the normal ``cmd.run`` parameters, this command offers the
``depth`` parameter to change the Windows default depth for the
``ConvertTo-JSON`` powershell command. The Windows default is 2. If you need
more depth, set that here.
.. note::
For some commands, setting the depth to a value greater than 4 greatly
increases the time it takes for the command to return and in many cases
returns useless data.
:param str cmd: The powershell command to run.
:param str cwd: The current working directory to execute the command in
:param str stdin: A string of standard input can be specified for the
command to be run using the ``stdin`` parameter. This can be useful in cases
where sensitive information must be read from standard input.:
:param str runas: User to run script as. If running on a Windows minion you
must also pass a password
:param str password: Windows only. Required when specifying ``runas``. This
parameter will be ignored on non-Windows platforms.
.. versionadded:: 2016.3.0
:param str shell: Shell to execute under. Defaults to the system default
shell.
:param bool python_shell: If False, let python handle the positional
arguments. Set to True to use shell features, such as pipes or redirection
:param list env: A list of environment variables to be set prior to
execution.
Example:
.. code-block:: yaml
salt://scripts/foo.sh:
cmd.script:
- env:
- BATCH: 'yes'
.. warning::
The above illustrates a common PyYAML pitfall, that **yes**,
**no**, **on**, **off**, **true**, and **false** are all loaded as
boolean ``True`` and ``False`` values, and must be enclosed in
quotes to be used as strings. More info on this (and other) PyYAML
idiosyncrasies can be found :doc:`here
</topics/troubleshooting/yaml_idiosyncrasies>`.
Variables as values are not evaluated. So $PATH in the following
example is a literal '$PATH':
.. code-block:: yaml
salt://scripts/bar.sh:
cmd.script:
- env: "PATH=/some/path:$PATH"
One can still use the existing $PATH by using a bit of Jinja:
.. code-block:: yaml
{% set current_path = salt['environ.get']('PATH', '/bin:/usr/bin') %}
mycommand:
cmd.run:
- name: ls -l /
- env:
- PATH: {{ [current_path, '/my/special/bin']|join(':') }}
:param bool clean_env: Attempt to clean out all other shell environment
variables and set only those provided in the 'env' argument to this
function.
:param str template: If this setting is applied then the named templating
engine will be used to render the downloaded file. Currently jinja, mako,
and wempy are supported
:param bool rstrip: Strip all whitespace off the end of output before it is
returned.
:param str umask: The umask (in octal) to use when running the command.
:param str output_loglevel: Control the loglevel at which the output from
the command is logged. Note that the command being run will still be logged
(loglevel: DEBUG) regardless, unless ``quiet`` is used for this value.
:param int timeout: A timeout in seconds for the executed process to return.
:param bool use_vt: Use VT utils (saltstack) to stream the command output
more interactively to the console and the logs. This is experimental.
:param bool reset_system_locale: Resets the system locale
:param bool ignore_retcode: Ignore the return code
:param str saltenv: The salt environment to use. Default is 'base'
:param int depth: The number of levels of contained objects to be included.
Default is 2. Values greater than 4 seem to greatly increase the time
it takes for the command to complete for some commands. eg: ``dir``
.. versionadded:: 2016.3.4
:param bool encode_cmd: Encode the command before executing. Use in cases
where characters may be dropped or incorrectly converted when executed.
Default is False.
:returns:
:dict: A dictionary of data returned by the powershell command.
CLI Example:
.. code-block:: powershell
salt '*' cmd.powershell "$PSVersionTable.CLRVersion"
'''
if 'python_shell' in kwargs:
python_shell = kwargs.pop('python_shell')
else:
python_shell = True
# Append PowerShell Object formatting
cmd += ' | ConvertTo-JSON'
if depth is not None:
cmd += ' -Depth {0}'.format(depth)
if encode_cmd:
# Convert the cmd to UTF-16LE without a BOM and base64 encode.
# Just base64 encoding UTF-8 or including a BOM is not valid.
log.debug('Encoding PowerShell command \'{0}\''.format(cmd))
cmd_utf16 = cmd.decode('utf-8').encode('utf-16le')
cmd = base64.standard_b64encode(cmd_utf16)
encoded_cmd = True
else:
encoded_cmd = False
# Retrieve the response, while overriding shell with 'powershell'
response = run(cmd,
cwd=cwd,
stdin=stdin,
runas=runas,
shell='powershell',
env=env,
clean_env=clean_env,
template=template,
rstrip=rstrip,
umask=umask,
output_loglevel=output_loglevel,
quiet=quiet,
timeout=timeout,
reset_system_locale=reset_system_locale,
ignore_retcode=ignore_retcode,
saltenv=saltenv,
use_vt=use_vt,
python_shell=python_shell,
password=password,
encoded_cmd=encoded_cmd,
**kwargs)
try:
return json.loads(response)
except Exception:
log.error("Error converting PowerShell JSON return", exc_info=True)
return {}
def run_bg(cmd,
cwd=None,
runas=None,
shell=DEFAULT_SHELL,
python_shell=None,
env=None,
clean_env=False,
template=None,
umask=None,
timeout=None,
output_loglevel='debug',
log_callback=None,
reset_system_locale=True,
ignore_retcode=False,
saltenv='base',
password=None,
**kwargs):
r'''
.. versionadded: 2016.3.0
Execute the passed command in the background and return it's PID
Note that ``env`` represents the environment variables for the command, and
should be formatted as a dict, or a YAML string which resolves to a dict.
:param str cmd: The command to run. ex: 'ls -lart /home'
:param str cwd: The current working directory to execute the command in,
defaults to `/root` (`C:\` in windows)
:param str output_loglevel: Control the loglevel at which the output from
the command is logged. Note that the command being run will still be logged
(loglevel: DEBUG) regardless, unless ``quiet`` is used for this value.
:param str runas: User to run script as. If running on a Windows minion you
must also pass a password
:param str password: Windows only. Required when specifying ``runas``. This
parameter will be ignored on non-Windows platforms.
.. versionadded:: 2016.3.0
:param str shell: Shell to execute under. Defaults to the system default
shell.
:param bool python_shell: If False, let python handle the positional
arguments. Set to True to use shell features, such as pipes or redirection
:param list env: A list of environment variables to be set prior to
execution.
Example:
.. code-block:: yaml
salt://scripts/foo.sh:
cmd.script:
- env:
- BATCH: 'yes'
.. warning::
The above illustrates a common PyYAML pitfall, that **yes**,
**no**, **on**, **off**, **true**, and **false** are all loaded as
boolean ``True`` and ``False`` values, and must be enclosed in
quotes to be used as strings. More info on this (and other) PyYAML
idiosyncrasies can be found :doc:`here
</topics/troubleshooting/yaml_idiosyncrasies>`.
Variables as values are not evaluated. So $PATH in the following
example is a literal '$PATH':
.. code-block:: yaml
salt://scripts/bar.sh:
cmd.script:
- env: "PATH=/some/path:$PATH"
One can still use the existing $PATH by using a bit of Jinja:
.. code-block:: yaml
{% set current_path = salt['environ.get']('PATH', '/bin:/usr/bin') %}
mycommand:
cmd.run:
- name: ls -l /
- env:
- PATH: {{ [current_path, '/my/special/bin']|join(':') }}
:param bool clean_env: Attempt to clean out all other shell environment
variables and set only those provided in the 'env' argument to this
function.
:param str template: If this setting is applied then the named templating
engine will be used to render the downloaded file. Currently jinja, mako,
and wempy are supported
:param str umask: The umask (in octal) to use when running the command.
:param int timeout: A timeout in seconds for the executed process to return.
.. warning::
This function does not process commands through a shell
unless the python_shell flag is set to True. This means that any
shell-specific functionality such as 'echo' or the use of pipes,
redirection or &&, should either be migrated to cmd.shell or
have the python_shell=True flag set here.
The use of python_shell=True means that the shell will accept _any_ input
including potentially malicious commands such as 'good_command;rm -rf /'.
Be absolutely certain that you have sanitized your input prior to using
python_shell=True
CLI Example:
.. code-block:: bash
salt '*' cmd.run_bg "fstrim-all"
The template arg can be set to 'jinja' or another supported template
engine to render the command arguments before execution.
For example:
.. code-block:: bash
salt '*' cmd.run_bg template=jinja "ls -l /tmp/{{grains.id}} | awk '/foo/{print \\$2}'"
Specify an alternate shell with the shell parameter:
.. code-block:: bash
salt '*' cmd.run_bg "Get-ChildItem C:\\ " shell='powershell'
If an equal sign (``=``) appears in an argument to a Salt command it is
interpreted as a keyword argument in the format ``key=val``. That
processing can be bypassed in order to pass an equal sign through to the
remote shell command by manually specifying the kwarg:
.. code-block:: bash
salt '*' cmd.run_bg cmd='ls -lR / | sed -e s/=/:/g > /tmp/dontwait'
'''
python_shell = _python_shell_default(python_shell,
kwargs.get('__pub_jid', ''))
res = _run(cmd,
stdin=None,
stderr=None,
stdout=None,
output_loglevel=output_loglevel,
use_vt=None,
bg=True,
with_communicate=False,
rstrip=False,
runas=runas,
shell=shell,
python_shell=python_shell,
cwd=cwd,
env=env,
clean_env=clean_env,
template=template,
umask=umask,
log_callback=log_callback,
timeout=timeout,
reset_system_locale=reset_system_locale,
ignore_retcode=ignore_retcode,
saltenv=saltenv,
password=password,
**kwargs
)
return {
'pid': res['pid']
}
| 34.379045
| 121
| 0.571683
|
7d34a881609a0792c0bbab90dac82920560b7737
| 4,546
|
py
|
Python
|
tests/plugins/test_server_discovery.py
|
angry-tony/ceph-lcm-decapod
|
535944d3ee384c3a7c4af82f74041b0a7792433f
|
[
"Apache-2.0"
] | 41
|
2016-11-03T16:40:17.000Z
|
2019-05-23T08:39:17.000Z
|
tests/plugins/test_server_discovery.py
|
Mirantis/ceph-lcm
|
fad9bad0b94f2ef608362953583b10a54a841d24
|
[
"Apache-2.0"
] | 30
|
2016-10-14T10:54:46.000Z
|
2017-10-20T15:58:01.000Z
|
tests/plugins/test_server_discovery.py
|
angry-tony/ceph-lcm-decapod
|
535944d3ee384c3a7c4af82f74041b0a7792433f
|
[
"Apache-2.0"
] | 28
|
2016-09-17T01:17:36.000Z
|
2019-07-05T03:32:54.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for server discovery plugin."""
import getopt
import json
import shutil
import unittest.mock
import pytest
from decapod_common import plugins
from decapod_common import process
from decapod_common.models import task
@pytest.fixture
def new_task(configure_model):
username = pytest.faux.gen_alpha()
initiator_id = pytest.faux.gen_uuid()
server_id = pytest.faux.gen_uuid()
tsk = task.ServerDiscoveryTask(server_id, "localhost", username,
initiator_id)
tsk = tsk.create()
tsk = tsk.start()
return tsk
@pytest.yield_fixture
def no_connect():
patcher = unittest.mock.patch(
"decapod_plugin_playbook_server_discovery."
"plugin.verbose_create_connection")
with patcher:
yield
@pytest.yield_fixture
def plugin(no_connect):
plug = plugins.get_playbook_plugins()
plug = plug["server_discovery"]()
yield plug
if plug.tempdir:
shutil.rmtree(plug.tempdir, ignore_errors=True)
plug.tempdir = None
def test_dynamic_inventory(new_task, plugin, monkeypatch):
monkeypatch.setenv(process.ENV_TASK_ID, str(new_task._id))
assert plugin.get_dynamic_inventory() == {
"new": {
"hosts": [new_task.data["host"]]
},
"_meta": {
"hostvars": {
new_task.data["host"]: {
"ansible_user": new_task.data["username"]
}
}
}
}
def test_compose_command(new_task, plugin):
plugin.on_pre_execute(new_task)
plugin.compose_command(new_task)
cmdline = plugin.proc.commandline
assert cmdline[0] == shutil.which("ansible")
opts, args = getopt.getopt(
cmdline[1:],
":m:i:t:",
["inventory-file=", "module-name=", "tree=", "become", "one-line"]
)
opts = dict(opts)
assert args == ["new"]
if "--inventory-file" in opts:
assert opts["--inventory-file"] == shutil.which("decapod-inventory")
else:
assert opts["-i"] == shutil.which("decapod-inventory")
if "--module-name" in opts:
assert opts["--module-name"] == plugin.MODULE
else:
assert opts["--m"] == plugin.MODULE
if "--tree" in opts:
assert opts["--tree"] == str(plugin.tempdir)
else:
assert opts["--t"] == str(plugin.tempdir)
def test_on_pre_execute(new_task, plugin):
assert not plugin.tempdir
plugin.on_pre_execute(new_task)
assert plugin.tempdir.is_dir()
assert not list(plugin.tempdir.iterdir())
def test_on_post_execute_ok(new_task, plugin):
plugin.on_pre_execute(new_task)
plugin.compose_command(new_task)
object_to_dump = {
"ansible_facts": {
"ansible_nodename": pytest.faux.gen_uuid()
},
"changed": False
}
with plugin.tempdir.joinpath("localhost").open("w") as resfp:
json.dump(object_to_dump, resfp)
srv = plugin.on_post_execute(new_task, None, None, None)
assert srv._id
assert srv.model_id
assert srv.version == 1
assert srv.facts == object_to_dump["ansible_facts"]
assert srv.username == new_task.data["username"]
assert srv.name == object_to_dump["ansible_facts"]["ansible_nodename"]
assert srv.fqdn == object_to_dump["ansible_facts"]["ansible_nodename"]
assert srv.ip == "127.0.0.1"
def test_on_post_execute_fail(new_task, plugin, pymongo_connection):
pymongo_connection.db.server.remove({})
plugin.on_pre_execute(new_task)
plugin.compose_command(new_task)
object_to_dump = {
"ansible_facts": {
"ansible_nodename": pytest.faux.gen_uuid()
},
"changed": False
}
with plugin.tempdir.joinpath("localhost").open("w") as resfp:
json.dump(object_to_dump, resfp)
srv = plugin.on_post_execute(new_task, ValueError(), ValueError, None)
assert not srv
assert not pymongo_connection.db.server.find({}).count()
| 27.889571
| 76
| 0.658381
|
00518a5424d50237443a7424dd4f287c8d86f230
| 841
|
py
|
Python
|
custom/ilsgateway/migrations/0005_add_pending_reporting_data_recalculation.py
|
rochakchauhan/commcare-hq
|
aa7ab3c2d0c51fe10f2b51b08101bb4b5a376236
|
[
"BSD-3-Clause"
] | null | null | null |
custom/ilsgateway/migrations/0005_add_pending_reporting_data_recalculation.py
|
rochakchauhan/commcare-hq
|
aa7ab3c2d0c51fe10f2b51b08101bb4b5a376236
|
[
"BSD-3-Clause"
] | null | null | null |
custom/ilsgateway/migrations/0005_add_pending_reporting_data_recalculation.py
|
rochakchauhan/commcare-hq
|
aa7ab3c2d0c51fe10f2b51b08101bb4b5a376236
|
[
"BSD-3-Clause"
] | null | null | null |
from django.db import models, migrations
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [
('locations', '0001_initial'),
('ilsgateway', '0004_merge'),
]
operations = [
migrations.CreateModel(
name='PendingReportingDataRecalculation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('domain', models.CharField(max_length=128)),
('type', models.CharField(max_length=128)),
('data', jsonfield.fields.JSONField()),
('sql_location', models.ForeignKey(to='locations.SQLLocation', on_delete=models.CASCADE)),
],
options={
},
bases=(models.Model,),
)
]
| 31.148148
| 114
| 0.570749
|
09e8dc288e087bb3d650e7c6242d7c420f71e87b
| 1,073
|
py
|
Python
|
mtp_api/apps/prison/urls.py
|
uk-gov-mirror/ministryofjustice.money-to-prisoners-api
|
fdf74298284804779e95294cf418ce97e5ea8666
|
[
"MIT"
] | null | null | null |
mtp_api/apps/prison/urls.py
|
uk-gov-mirror/ministryofjustice.money-to-prisoners-api
|
fdf74298284804779e95294cf418ce97e5ea8666
|
[
"MIT"
] | null | null | null |
mtp_api/apps/prison/urls.py
|
uk-gov-mirror/ministryofjustice.money-to-prisoners-api
|
fdf74298284804779e95294cf418ce97e5ea8666
|
[
"MIT"
] | null | null | null |
from django.conf.urls import include, url
from rest_framework import routers
from prison import views
router = routers.DefaultRouter()
router.register(r'prisoner_locations', views.PrisonerLocationView, base_name='prisonerlocation')
router.register(r'prisoner_validity', views.PrisonerValidityView, base_name='prisoner_validity')
router.register(r'prisoner_account_balances', views.PrisonerAccountBalanceView, base_name='prisoner_account_balance')
router.register(r'prisons', views.PrisonView, base_name='prison')
router.register(r'prison_populations', views.PopulationView, base_name='prison_population')
router.register(r'prison_categories', views.CategoryView, base_name='prison_category')
urlpatterns = [
url(r'^', include(router.urls)),
url(r'^prisoner_locations/actions/delete_old/$',
views.DeleteOldPrisonerLocationsView.as_view(),
name='prisonerlocation-delete-old'),
url(r'^prisoner_locations/actions/delete_inactive/$',
views.DeleteInactivePrisonerLocationsView.as_view(),
name='prisonerlocation-delete-inactive'),
]
| 46.652174
| 117
| 0.794035
|
e68669581065c08095f05d35b3157e902c8c44a1
| 4,661
|
py
|
Python
|
tern/formats/spdx/spdxtagvalue/image_helpers.py
|
the-wright-engineer/tern
|
8716e12eba85d36e566f0d4ec25a7ed9d4b1b51f
|
[
"BSD-2-Clause"
] | null | null | null |
tern/formats/spdx/spdxtagvalue/image_helpers.py
|
the-wright-engineer/tern
|
8716e12eba85d36e566f0d4ec25a7ed9d4b1b51f
|
[
"BSD-2-Clause"
] | null | null | null |
tern/formats/spdx/spdxtagvalue/image_helpers.py
|
the-wright-engineer/tern
|
8716e12eba85d36e566f0d4ec25a7ed9d4b1b51f
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2020 VMware, Inc. All Rights Reserved.
# SPDX-License-Identifier: BSD-2-Clause
"""
Helper functions for image level SPDX document blocks
Images for SPDX act like a Package
"""
from tern.formats.spdx import formats as spdx_formats
from tern.formats.spdx.spdxtagvalue import layer_helpers as lhelpers
from tern.formats.spdx.spdxtagvalue import package_helpers as phelpers
def get_image_spdxref(image_obj):
'''Given the image object, return an SPDX reference ID'''
# here we return the image name, tag and id
return 'SPDXRef-{}'.format(image_obj.get_human_readable_id())
def get_image_layer_relationships(image_obj):
'''Given the image object, return the relationships to the layers'''
block = ''
image_reference = get_image_spdxref(image_obj)
for layer in image_obj.layers:
block = block + spdx_formats.contains.format(
outer=image_reference,
inner=lhelpers.get_layer_spdxref(layer)) + '\n'
return block
def get_image_packages_block(image_obj, template):
'''Given the image object and its template, return the list of packages
in the image in SPDX format. The spec requires unique package references
to identify each package found in the image.'''
block = ''
package_refs = set()
for layer in image_obj.layers:
for package in layer.packages:
pkg_ref = phelpers.get_package_spdxref(package)
if pkg_ref not in package_refs:
block += phelpers.get_package_block(package, template) + '\n'
package_refs.add(pkg_ref)
return block
def get_image_packages_license_block(image_obj):
'''Given the image object, get all the the licenses found for packages
in the image. The SPDX spec requires that each license reference be
unique for the document'''
block = ''
licenses = set()
for layer in image_obj.layers:
for package in layer.packages:
if package.pkg_license:
licenses.add(package.pkg_license)
for l in licenses:
block += spdx_formats.license_id.format(
license_ref=phelpers.get_package_license_ref(l)) + '\n'
block += spdx_formats.extracted_text.format(orig_license=l) + '\n'
return block
def get_image_block(image_obj, template):
'''Given an image object and the template object for SPDX, return the
SPDX document block for the given image. For SPDX, the image is a package
and hence follows the spec for packages.
The mapping for images should have these keys:
PackageName
PackageVersion
PackageDownloadLocation'''
block = ''
mapping = image_obj.to_dict(template)
# Package Name
block += 'PackageName: {}\n'.format(mapping['PackageName'])
# Package SPDXID
block += 'SPDXID: {}\n'.format(get_image_spdxref(image_obj))
# Package Version
block += 'PackageVersion: {}\n'.format(mapping['PackageVersion'])
# Package Download Location
block += 'PackageDownloadLocation: {}\n'.format(
mapping['PackageDownloadLocation'])
# Files Analyzed (always false)
block += 'FilesAnalyzed: false\n'
# Concluded Package License (always NOASSERTION)
block += 'PackageLicenseConcluded: NOASSERTION\n'
# Declared Package License (always NOASSERTION)
block += 'PackageLicenseDeclared: NOASSERTION\n'
# Package Copyright Text (always NOASSERTION)
block += 'PackageCopyrightText: NOASSERTION\n'
# blank line
block += '\n'
# Since files are not analyzed within the image we move to relationships
block += get_image_layer_relationships(image_obj) + '\n'
# blank line
block += '\n'
# Describe each layer 'package' that the image contains
for index, layer in enumerate(image_obj.layers):
block += lhelpers.get_layer_block(
layer, template, mapping['PackageDownloadLocation']) + '\n'
# print layer relationship to previous layer if there is one
if index != 0:
block += lhelpers.get_layer_prereq(
image_obj.layers[index], image_obj.layers[index - 1]) + '\n'
# if the layer has packages, print out the relationships
if layer.packages:
block += lhelpers.get_layer_package_relationships(layer) + '\n'
# print out all the packages if they are known
pkg_block = get_image_packages_block(image_obj, template)
if pkg_block:
# add a blank line before adding the package block
block += pkg_block + '\n'
# print out the license block for packages
block += get_image_packages_license_block(image_obj)
return block
| 40.181034
| 77
| 0.687621
|
14be368a589f2426fa54cd166d7903cdd0aef1a8
| 5,911
|
py
|
Python
|
blueprint/qml_modules_indexing/no2_all_qml_types.py
|
Likianta/pyml
|
b0005b36aa94958a7d3e306a9df65fea46669d18
|
[
"MIT"
] | 1
|
2021-04-11T00:32:52.000Z
|
2021-04-11T00:32:52.000Z
|
blueprint/qml_modules_indexing/no2_all_qml_types.py
|
Likianta/pyml
|
b0005b36aa94958a7d3e306a9df65fea46669d18
|
[
"MIT"
] | null | null | null |
blueprint/qml_modules_indexing/no2_all_qml_types.py
|
Likianta/pyml
|
b0005b36aa94958a7d3e306a9df65fea46669d18
|
[
"MIT"
] | null | null | null |
"""
Requirements:
如需运行本模块, 请先安装 Qt 5.0+ (推荐 5.15) 完整版.
本模块所用到的离线文件读取自:
"{YourQtProgram}/Docs/Qt-{version}/qtdoc/qmltypes.html".
"""
import re
from collections import defaultdict
from bs4 import BeautifulSoup
from lk_logger import lk
from lk_utils import read_and_write
def main(file_i, file_o):
"""
Args:
file_i: '~/blueprint/resources/no2_all_qml_types.html'. 该文件被我事先从
"{YourQtProgram}/Docs/Qt-{version}/qtdoc/qmltypes.html" 拷贝过来.
file_o: 生成文件. "~/blueprint/resources/no3_all_qml_types.json"
{module_group: {module: {type_name: path}, ...}, ...}
# {模组: {模块: {类型: 路径}}}
e.g. {
'qtquick': {
'qtquick': {
'Rectangle': 'qtquick/qml-qtquick-rectangle.html',
'Text': 'qtquick/qml-qtquick-text.html',
...
},
'qtquick-window': {
'Window': 'qtquick/qml-qtquick-window-window.html',
...
},
...
},
...
}
思路:
1. 我们安装了 Qt 主程序以后, 在软件安装目录下的 'Docs/Qt-{version}' 中有
它的 API 文档
2. 其中 "~/Docs/Qt-{version}/qtdoc/qmltypes.html" 列出了全部的 qml types
3. 我们对 "qmltypes.html" 用 BeautifulSoup 解析, 从中获取每个 qml types
和它的链接, 最终我们将得到这些信息: 模组, 模块, 类型, 路径等
4. 将这些信息保存到本项目下的 "~/resources/qmltypes.json" 文件中
"""
soup = BeautifulSoup(read_and_write.read_file(file_i), 'html.parser')
# https://www.itranslater.com/qa/details/2325827141935563776
data = defaultdict(lambda: defaultdict(dict))
# {module_group: {module: {type_name: filename, ...}, ...}, ...}
container = soup.find('div', 'flowListDiv')
for e in container.find_all('dd'):
link = e.a['href'] # type: str
# e.g. "../qtdatavisualization/qml-qtdatavisualization-
# abstract3dseries.html"
match = re.search(r'\.\./(\w+)/([-\w]+)\.html', link)
# | ^-1-^ ^--2---^ |
# ^-------- group(0) -------^
# match.group(0): '../qtdatavisualization/qml-qtdatavisualization
# -abstract3dseries.html'
# match.group(1): 'qtdatavisualization'
# match.group(2): 'qml-qtdatavisualization-abstract3dseries'
assert match, e
module_group = match.group(1)
module = match.group(2)
# see `blueprint/qml_modules_indexing/no1_all_qml_modules.py:comments
# :针对 QtQuick Controls 的处理`
if module_group == 'qtquickcontrols1':
continue
if 'qtquick-controls2' in module:
# e.g. 'qml-qtquick-controls2-label'
module = module.replace('controls2', 'controls')
path = match.group(0).lstrip('../')
# -> 'qtdatavisualization/qml-qtdatavisualization-abstract3dseries
# .html'
module_group = _correct_module_lettercase(module_group)
# 'qtdatavisualization' -> 'QtDataVisualization'
module = _correct_module_lettercase('-'.join(module.split('-')[1:-1]))
# eg1: 'qml-qtdatavisualization-abstract3dseries' -> ['qml',
# 'qtdatavisualization', 'abstract3dseries'] -> [
# 'qtdatavisualization'] -> 'qtdatavisualization'
# -> 'QtDataVisualization'
# eg2: 'qml-qt3d-input-abstractactioninput' -> ['qml', 'qt3d',
# 'input', 'abstractactioninput'] -> ['qt3d', 'input',
# 'abstractactioninput'] -> 'qt3d-input' -> 'Qt3D.Input'
# 注: 为什么要舍去末尾的元素? 因为末尾的那个是 `type_name`, 不是
# `module`. 接下来我们会抽取 `type_name`.
type_name = e.text.split(':', 1)[0]
# 注意我们不使用 `correct_module_lettercase(match.group(2).split('-')
# [-1])`, 是因为 `correct_module_lettercase` 的词库范围比较小, 仅对
# `module_group` 和 `module` 做了覆盖, 不能保证对 `type_name` 的处理正
# 确; 而 `soup` 是可以比较轻松地通过 tag 提取到它的, 所以通过 html 元
# 素获取.
# e.g. 'RadioButton: QtQuickControls' -> 'RadioButton'
lk.loga(module_group, module, type_name)
data[module_group][module][type_name] = path
read_and_write.dumps(data, file_o)
# ------------------------------------------------------------------------------
qml_modules = read_and_write.loads('../resources/no2_all_qml_modules.json')
qml_modules = qml_modules['module_group'] | qml_modules['module'] # type: dict
qml_modules.update({ # 扩充
'' : '',
'qtquick-controls-private': 'QtQuick.Controls.Private',
'mediaplayer-qml' : 'QtMediaPlayer',
# 注: 这个其实是不存在的, 只是为了不报错所以加上去
})
def _correct_module_lettercase(module: str):
""" 修正模块的大小写.
示例:
'qtquick-window' -> 'QtQuick.Window'
'qtgraphicaleffects' -> 'QtGraphicalEffects
注意: 存在一些特殊情况:
'qt-labs-animation' -> 'Qt.labs.animation'
思路:
1. 我们需要把模块的名字按照词来拆分:
'qtgraphicaleffects' -> ['qt', 'graphical', 'effects']
2. 然后将每个词的首字母大写:
['Qt', 'Graphical', 'Effects']
3. 再拼接回去:
'QtGraphicalEffects'
(对于一些特殊情况, 比如 Qt.labs 要求全小写, 则需要进一步判断和调整.)
单词拆分该怎么实现?
方案 1: 引入一个第三方库来切词. 缺点是词库体积大, 有些 Qt 自定义词不在里
面 (自己也不一定找全), 甚至可能会切分存在歧义导致不准确. 成本高且效果差.
方案 2: 从 "{YourQtProgram}/Docs/Qt-{version}/qtdoc/modules-qml.html" 页
面, 把里面提到的所有单词都提取出来, 然后组成一个列表. 这里的单词应该完整
覆盖了模块的名字中的所有情况. 然后我们把列表转换成一个前缀树, 就可以以一
种简单且准确的方式去分词了.
目前采用的是方案 2. 方案 2 需要提前准备这样一个单词列表, 见:
`blueprint/qml_indexing/no1_all_qml_modules.py`.
"""
global qml_modules
return qml_modules[module]
if __name__ == '__main__':
main('../resources/no3_all_qml_types.html',
'../resources/no4_all_qml_types.json')
| 37.649682
| 80
| 0.554221
|
fb24f19035b1e7752dc77b39e88ff03c37f038cc
| 1,810
|
py
|
Python
|
basis/namedtuple.py
|
Max-PJB/python-learning2
|
e8b05bef1574ee9abf8c90497e94ef20a7f4e3bd
|
[
"MIT"
] | null | null | null |
basis/namedtuple.py
|
Max-PJB/python-learning2
|
e8b05bef1574ee9abf8c90497e94ef20a7f4e3bd
|
[
"MIT"
] | null | null | null |
basis/namedtuple.py
|
Max-PJB/python-learning2
|
e8b05bef1574ee9abf8c90497e94ef20a7f4e3bd
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
@ Author : Max_Pengjb
@ date : 2018/9/20
@ IDE : PyCharm
@ Site :
-------------------------------------------------
"""
__author__ = 'Max_Pengjb'
import collections
def CreatePoint():
# 在此处创建并返回一个命名元组实例,有属性x,y,初值都为0
# Point = collections.namedtuple("Point", "x,y")
# Point = collections.namedtuple('Point', 'x, y')
# Point = collections.namedtuple("Point", "x,y")
# Point = collections.namedtuple("Point", "x y")
# Point = collections.namedtuple('Point', 'x y')
return collections.namedtuple('Point', 'x y')(x=2, y=2)
def IncX(p):
# 在此处对变量p的x坐标进行+1操作,然后返回修改过后的新对象
return p._replace(x=p.x + 1)
def IncY(p):
# 在此处对参数p的y坐标进行+1操作,然后返回修改过后的新对象
return p._replace(y=p.y + 1)
#
def PrintPoint(p):
# 按照:"当前位置:x = XXX,y = XXX" 的格式打印参数p
print("当前位置:x = {},y = {}".format(p.x, p.y))
p = CreatePoint()
PrintPoint(p)
p = IncX(p)
PrintPoint(p)
p = IncY(p)
PrintPoint(p)
c = collections.Counter("aaaaabbbbbccccccc")
print(c["c"])
print(c.most_common(2))
c2 = collections.Counter(a=2, b=2, c=1, d=1)
print(c2['a'])
c2['a'] = 10
print(c2['a'])
print(c)
print(c2)
print(c + c2 + c2 + c2 + c2 + c2 + c2 + c2 + c2 + c2 + c2 + c2 + c2 + c2 + c2 + c2 + c2 + c2 + c2)
print(c - c2)
for k, v in c2.items():
print(k, v)
print(list(c2.elements()))
c3 = collections.Counter()
print(c3)
c4 = collections.Counter('which')
print(c4)
c4.subtract('witch') # subtract elements from another iterable
print(c4)
c4.subtract(collections.Counter('watch')) # subtract elements from another counter
print(c4)
de = collections.deque()
for i in range(0, 10, 2):
de.append(i)
de.appendleft(i+1)
print(list(de))
| 22.625
| 98
| 0.574586
|
e0ccd98c7e2464e4b2ab68ac7684e8879ef2e89e
| 1,504
|
py
|
Python
|
smartphones_parse/spiders/fora_kz.py
|
Vital77766688/smartphones_parse
|
a768d7e7f9605b3a4cc3c5eaaaeecfd5ec7610a3
|
[
"MIT"
] | null | null | null |
smartphones_parse/spiders/fora_kz.py
|
Vital77766688/smartphones_parse
|
a768d7e7f9605b3a4cc3c5eaaaeecfd5ec7610a3
|
[
"MIT"
] | null | null | null |
smartphones_parse/spiders/fora_kz.py
|
Vital77766688/smartphones_parse
|
a768d7e7f9605b3a4cc3c5eaaaeecfd5ec7610a3
|
[
"MIT"
] | null | null | null |
import scrapy
from ..utils import clean_price, clean_title
class ForaKzSpider(scrapy.Spider):
name = 'fora.kz'
def start_requests(self, **kwargs):
url = 'https://fora.kz/catalog/smartfony-plansety/smartfony/almaty'
yield scrapy.Request(url=url, callback=self.parse_pages)
def parse_pages(self, response, **kwargs):
for item in response.css('div.catalog-list-item'):
if item.css('.injectable-banner'):
continue
url = response.urljoin(item.css('div.item-info a::attr(href)').get())
yield scrapy.Request(url=url, callback=self.parse_details)
next_page = response.css('ul.pagination li a::attr(href)').getall()[-1]
if next_page:
yield response.follow(next_page, callback=self.parse_pages)
def parse_details(self, response, **kwargs):
data = {
'shop': 'fora.kz',
'url': response.request.url,
'title': clean_title(
response.css('h1[itemprop="name"]::text').get(),''
),
'price': clean_price(
response.css('div.price span[itemprop="price"]').get(),
response.request.url
),
'images': response.css('div#product-gallery div#thumbs img::attr(src)').getall(),
'specs': self.get_specs(response),
}
yield data
def get_specs(self, response):
data = {}
for item in response.css('div.specifications-panel'):
spec_category = item.css('h4::text').get().strip()
data[spec_category] = {}
for spec in item.css('table tr'):
data[spec_category][spec.css('th::text').get().strip()] = spec.css('td::text').get().strip()
return data
| 32
| 96
| 0.680851
|
9c691352d031fa60a02eaa54d8993f0d8f24c674
| 1,229
|
py
|
Python
|
nova/virt/powervm/constants.py
|
NetApp/nova
|
ca490d48a762a423449c654d5a7caeadecf2f6ca
|
[
"Apache-2.0"
] | 2
|
2015-11-05T04:52:34.000Z
|
2016-03-07T03:00:06.000Z
|
nova/virt/powervm/constants.py
|
NetApp/nova
|
ca490d48a762a423449c654d5a7caeadecf2f6ca
|
[
"Apache-2.0"
] | null | null | null |
nova/virt/powervm/constants.py
|
NetApp/nova
|
ca490d48a762a423449c654d5a7caeadecf2f6ca
|
[
"Apache-2.0"
] | 1
|
2020-07-24T08:14:24.000Z
|
2020-07-24T08:14:24.000Z
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.compute import power_state
POWERVM_NOSTATE = ''
POWERVM_RUNNING = 'Running'
POWERVM_STARTING = 'Starting'
POWERVM_SHUTDOWN = 'Not Activated'
POWERVM_POWER_STATE = {
POWERVM_NOSTATE: power_state.NOSTATE,
POWERVM_RUNNING: power_state.RUNNING,
POWERVM_SHUTDOWN: power_state.SHUTDOWN,
POWERVM_STARTING: power_state.RUNNING
}
POWERVM_CPU_INFO = ('ppc64', 'powervm', '3940')
POWERVM_HYPERVISOR_TYPE = 'powervm'
POWERVM_HYPERVISOR_VERSION = '7.1'
POWERVM_MIN_ROOT_GB = 10
POWERVM_MIN_MEM = 512
POWERVM_MAX_MEM = 1024
POWERVM_MAX_CPUS = 1
POWERVM_MIN_CPUS = 1
| 30.725
| 78
| 0.754272
|
af4380285555262bab398e9f8878118ea0a4a6a1
| 1,470
|
py
|
Python
|
scripts/qcomp2020_generate_invocations.py
|
MKlauck/qcomp2020
|
25bc4295754bec07e6b91779ff9d9b34e3bf18dc
|
[
"CC-BY-4.0"
] | 3
|
2020-08-04T05:59:41.000Z
|
2021-04-18T16:55:53.000Z
|
scripts/qcomp2020_generate_invocations.py
|
MKlauck/qcomp2020
|
25bc4295754bec07e6b91779ff9d9b34e3bf18dc
|
[
"CC-BY-4.0"
] | null | null | null |
scripts/qcomp2020_generate_invocations.py
|
MKlauck/qcomp2020
|
25bc4295754bec07e6b91779ff9d9b34e3bf18dc
|
[
"CC-BY-4.0"
] | null | null | null |
from benchmark import *
from utility import *
from invocation import Invocation
import tool
"""
This script creates a list of invocations for the QCOMP 2020 benchmarks.
The invocations can be executed via 'execute_invocations.py'
"""
qcomp_benchmarks = []
if __name__ == "__main__":
benchmarks = load_csv(os.path.join(sys.path[0], "qcomp2020_benchmarks.csv"))
progressbar = Progressbar(len(benchmarks), "Generating invocations for benchmarks")
invocations = []
num_b = 0
for benchmark_csv in benchmarks:
b = get_benchmark_from_id(settings,"{}.{}.{}".format(benchmark_csv[0], benchmark_csv[3], benchmark_csv[4]))
num_b = num_b + 1
progressbar.print_progress(num_b)
b.check_validity()
invocations_b = tool.get_invocations(b)
if invocations_b is not None:
if isinstance(invocations_b, Invocation):
invocations_b = [invocations_b]
# if len(invocations_b) > 2:
# print("Found more than two invocations for benchmark {}. This is not allowed in QCOMP2019.".format(b.get_identifier()))
for i in invocations_b:
i_json = OrderedDict([("benchmark-id", b.get_identifier())])
i_json.update(i.to_json())
invocations.append(i_json)
save_json(invocations, settings.invocations_filename())
print("\nSaved {} invocations to file '{}'".format(len(invocations), settings.invocations_filename()))
| 43.235294
| 137
| 0.670748
|
71bb94a6bb8e484cea8144b56b3b38a79797a25a
| 1,482
|
py
|
Python
|
semtk3/runtimeconstraint.py
|
ge-semtk/semtk-python3
|
d6d8e749121ec9309b47d17b01bdcd98935f7e70
|
[
"Apache-2.0"
] | 1
|
2020-04-30T16:48:05.000Z
|
2020-04-30T16:48:05.000Z
|
semtk3/runtimeconstraint.py
|
ge-semtk/semtk-python3
|
d6d8e749121ec9309b47d17b01bdcd98935f7e70
|
[
"Apache-2.0"
] | 7
|
2020-07-09T18:31:41.000Z
|
2021-09-02T12:23:36.000Z
|
semtk3/runtimeconstraint.py
|
ge-semtk/semtk-python3
|
d6d8e749121ec9309b47d17b01bdcd98935f7e70
|
[
"Apache-2.0"
] | 2
|
2020-07-17T22:35:41.000Z
|
2020-07-28T16:16:09.000Z
|
#
# Copyright 2019-20 General Electric Company
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
class RuntimeConstraint():
OP_MATCHES = "MATCHES"
OP_NOTMATCHES = "NOTMATCHES"
OP_REGEX = "REGEX"
OP_GREATERTHAN = "GREATERTHAN"
OP_GREATERTHANOREQUALS = "GREATERTHANOREQUALS"
OP_LESSTHAN = "LESSTHAN"
OP_LESSTHANOREQUALS = "LESSTHANOREQUALS"
OP_VALUEBETWEEN = "VALUEBETWEEN"
OP_VALUEBETWEENUNINCLUSIVE = "VALUEBETWEENUNINCLUSIVE"
def __init__(self, sparqlId, operator, operand_list):
'''
Create a SemtkTable from the @table results from a Semtk REST service
'''
self.sparqlId = sparqlId
self.operator = operator
self.operand_list = operand_list
def to_json(self):
jdict = {}
jdict["SparqlID"] = self.sparqlId
jdict["Operator"] = self.operator
jdict["Operands"] = self.operand_list
return json.dumps(jdict)
| 32.933333
| 81
| 0.688259
|
a468cedf04cdb15aaed12d08da21a4790a36787d
| 120
|
py
|
Python
|
thonny.py
|
dotimothy/TPIOPi
|
13a68ca88f5a71f1d6984c5f6c3c84d107b12fd8
|
[
"MIT"
] | 3
|
2020-09-24T22:33:20.000Z
|
2021-08-29T06:08:33.000Z
|
thonny.py
|
dotimothy/TPIOPi
|
13a68ca88f5a71f1d6984c5f6c3c84d107b12fd8
|
[
"MIT"
] | null | null | null |
thonny.py
|
dotimothy/TPIOPi
|
13a68ca88f5a71f1d6984c5f6c3c84d107b12fd8
|
[
"MIT"
] | null | null | null |
def factorial(n):
if(n != 0):
return n * factorial((n-1))
else:
return 1;
print (factorial(100))
| 20
| 35
| 0.525
|
9c8071342a90515a217f70edb243cca401b2df1e
| 3,171
|
py
|
Python
|
permpy/avclass.py
|
cheyneh/permpy
|
ceff68d7b4b3189506ef8c15d5bcc078f0cd1aa2
|
[
"MIT"
] | 7
|
2015-02-24T22:05:36.000Z
|
2022-01-27T12:22:38.000Z
|
permpy/avclass.py
|
cheyneh/permpy
|
ceff68d7b4b3189506ef8c15d5bcc078f0cd1aa2
|
[
"MIT"
] | 2
|
2019-08-19T21:28:26.000Z
|
2021-03-14T22:00:26.000Z
|
permpy/avclass.py
|
cheyneh/permpy
|
ceff68d7b4b3189506ef8c15d5bcc078f0cd1aa2
|
[
"MIT"
] | 7
|
2015-09-20T20:08:43.000Z
|
2022-01-27T12:22:43.000Z
|
from math import factorial
import logging
import sys
import types
from .permutation import Permutation
from .permset import PermSet
from .permclass import PermClass
# from .propertyclass import PropertyClass
class AvClass(PermClass):
"""An object representing an avoidance class.
Notes:
Does not contain the empty permutation.
Examples:
>>> B = [123]
>>> A = AvClass(B, max_len=4)
>>> for S in A:
... print(S)
...
Set of 1 permutations
Set of 1 permutations
Set of 2 permutations
Set of 5 permutations
Set of 14 permutations
"""
def __init__(self, basis, max_len=8):
if isinstance(basis, Permutation):
basis = [basis]
else:
basis = [Permutation(b) for b in basis]
C = [PermSet(Permutation())] # List consisting of just the PermSet containing the empty Permutation
if max_len > 0:
if Permutation(1) not in basis:
C.append(PermSet(Permutation(1)))
else:
C.append(PermSet())
for length in range(max_len-1):
C.append(C[-1].right_extensions(basis=basis, trust=True))
super().__init__(C)
self.basis = basis
def extend_by_one(self, trust=True):
"""Extend `self` by right-extending its ultimate PermSet.
Args:
trust (bool): Whether of not we can trust the insertion values of
the ultimate PermSet. In this context, we generally can.
"""
logging.debug(f"Calling extend_by_one({self}, trust={trust})")
self.append(self[-1].right_extensions(basis=self.basis, trust=trust))
def extend_to_length(self, length, trust=True):
for _ in range(self.max_len+1, length+1):
self.extend_by_one(trust=trust)
def extend_by_length(self, length, trust=True):
for _ in range(length):
self.extend_by_one(trust=trust)
def right_juxtaposition(self, C, generate_perms=True):
A = PermSet()
max_length = max([len(P) for P in self.basis]) + max([len(P) for P in C.basis])
for n in range(2, max_length+1):
for i in range(0, factorial(n)):
P = Permutation(i,n)
for Q in self.basis:
for R in C.basis:
if len(Q) + len(R) == n:
if (Q == Permutation(P[0:len(Q)]) and R == Permutation(P[len(Q):n])):
A.add(P)
elif len(Q) + len(R) - 1 == n:
if (Q == Permutation(P[0:len(Q)]) and Permutation(R) == Permutation(P[len(Q)-1:n])):
A.add(P)
return AvClass(list(A.minimal_elements()), length=(8 if generate_perms else 0))
def above_juxtaposition(self, C, generate_perms=True):
inverse_class = AvClass([P.inverse() for P in C.basis], 0)
horizontal_juxtaposition = self.right_juxtaposition(inverse_class, generate_perms=False)
return AvClass([B.inverse() for B in horizontal_juxtaposition.basis], length=(8 if generate_perms else 0))
def contains(self, other):
"""Check if `self` contains `other` as a permutation class using their bases.
"""
for p in self.basis:
for q in other.basis:
if p in q:
break
else:
# If we're here, then `p` is not involved in any of the basis elements of `q`, so
# the permutation `p` lies in `other` but not `self`.
return False
return True
if __name__ == "__main__":
print()
B = [123]
A = AvClass(B, 12)
for idx, S in enumerate(A):
print(S)
| 28.567568
| 108
| 0.673605
|
015a2698755efc8b8702b3e3d9d92d0679cfdf8a
| 9,268
|
py
|
Python
|
naturalnets/tools/episode_runner_autoencoder.py
|
bjuergens/NaturalNets
|
fd67f1b3c443761270adaf9877ed2a6358d830f0
|
[
"MIT"
] | null | null | null |
naturalnets/tools/episode_runner_autoencoder.py
|
bjuergens/NaturalNets
|
fd67f1b3c443761270adaf9877ed2a6358d830f0
|
[
"MIT"
] | 2
|
2021-04-13T11:47:01.000Z
|
2021-04-30T11:44:46.000Z
|
naturalnets/tools/episode_runner_autoencoder.py
|
bjuergens/NaturalNets
|
fd67f1b3c443761270adaf9877ed2a6358d830f0
|
[
"MIT"
] | 1
|
2021-11-03T09:36:40.000Z
|
2021-11-03T09:36:40.000Z
|
import copy
import multiprocessing as mp
import os
import time
import gym
import numpy as np
import torch
from gym.spaces import flatdim
from procgen import ProcgenGym3Env
from autoencoder.conv_unpool import ConvUnpoolAutoencoder
# print("Setting number of Torch threads and interop threads to 1.")
# torch.set_num_threads(1)
# torch.set_num_interop_threads(1)
class EpisodeRunnerAutoEncoder:
def __init__(self, environment: dict, brain_class, brain_configuration: dict, use_gpu: bool = True):
if use_gpu and torch.cuda.is_available():
self.device = torch.device('cuda')
self.map_location = "cuda:0"
else:
if use_gpu:
print("Warning: use_gpu set to True but CUDA device not found, continuing with CPU")
self.device = torch.device('cpu')
self.map_location = self.device
self.autoencoder = ConvUnpoolAutoencoder()
self.autoencoder.load_state_dict(torch.load("autoencoder/Conv_Unpool.pt", self.map_location))
self.autoencoder.to(self.device)
self.autoencoder.eval()
self.env_name = environment["name"]
self.distribution_mode = environment["distribution_mode"]
env = gym.make(self.env_name, distribution_mode=self.distribution_mode)
self.input_size = 32 * 6 * 6
self.output_size = flatdim(env.action_space)
self.brain_class = brain_class
self.brain_configuration = brain_configuration
self.brain_state = brain_class.generate_brain_state(input_size=self.input_size,
output_size=self.output_size,
configuration=brain_configuration)
def preprocess_ob(self, ob):
return np.transpose(ob, (0, 3, 1, 2)) / 255.0
def transform_ob(self, ob: np.ndarray) -> np.ndarray:
ob = self.preprocess_ob(ob)
with torch.no_grad():
ob = torch.from_numpy(ob).float().to(self.device)
ob = self.autoencoder.encode(ob)
# Move back to memory first, this is required when converting Tensor that is on CUDA device
ob_cpu = ob.cpu().clone().numpy()
del ob
torch.cuda.empty_cache()
return ob_cpu
def get_individual_size(self):
return self.brain_class.get_individual_size(input_size=self.input_size, output_size=self.output_size,
configuration=self.brain_configuration,
brain_state=self.brain_state)
def get_input_size(self):
return self.input_size
def get_output_size(self):
return self.output_size
def save_brain_state(self, path):
self.brain_class.save_brain_state(path, self.brain_state)
def get_free_parameter_usage(self):
return self.brain_class.get_free_parameter_usage(input_size=self.input_size, output_size=self.output_size,
configuration=self.brain_configuration,
brain_state=self.brain_state)
def get_actions(self, brain, ob):
return brain.step(ob.flatten())
def calculate_actions_trivial(self, brains, obs):
actions = [brain.step(ob.flatten()) for (brain, ob) in zip(brains, obs)]
return actions
def eval_fitness(self, evaluations, episode_steps: int = 500, break_all_episodes: bool = False):
"""
:param evaluations: List of 3-tuples (individual, env_seed, number_of_rounds)
:param episode_steps: Number of steps per episode
:param break_all_episodes: When one episode is done, break all episodes
:return:
"""
# Extract parameters, this list of lists is necessary since pool.map only accepts a single argument
# See here: http://python.omics.wiki/multiprocessing_map/multiprocessing_partial_function_multiple_arguments
# individual = evaluations[0]
env_seed = evaluations[0][1]
number_of_rounds = evaluations[0][2]
brains = []
for single_evaluation in evaluations:
brains.append(self.brain_class(input_size=self.input_size, output_size=self.output_size,
individual=single_evaluation[0], configuration=self.brain_configuration,
brain_state=self.brain_state))
fitness_total = 0
times_episodes = []
for i in range(number_of_rounds):
# num_threads=8 can be set here, don't know how it effects performance yet
env = ProcgenGym3Env(num=len(evaluations), env_name="heist", use_backgrounds=False,
distribution_mode=self.distribution_mode, num_levels=1, start_level=env_seed + i)
rew, ob, first = env.observe()
observations = ob["rgb"]
ob = self.transform_ob(observations)
# print(torch.cuda.memory_summary(device=self.device))
# print("Memory: {}".format(torch.cuda.memory_allocated(device=self.device)))
# pool = mp.get_context("spawn").Pool(processes=os.cpu_count())
fitness_current = [0] * len(evaluations)
# times_actions = []
time_s = time.time()
for i in range(episode_steps):
# actions = pool.starmap(self.get_actions, zip(brains, ob))
# time_actions_s = time.time()
actions = self.calculate_actions_trivial(brains, ob)
# times_actions.append(time.time() - time_actions_s)
actions = np.argmax(actions, axis=1)
env.act(actions)
rew, ob, first = env.observe()
if any(first) and break_all_episodes:
print("break_episodes: One or more environments are done, stopping all episodes")
break
observations = ob["rgb"]
ob = self.transform_ob(observations)
# print(torch.cuda.memory_summary(device=self.device))
# print("Memory: {}".format(torch.cuda.memory_allocated(device=self.device)))
# if i > 10:
# break
fitness_current += rew
print("Episodes with VecEnv finished")
# print("Times actions Mean {}".format(np.mean(times_actions)))
# print("Times actions Std {}".format(np.std(times_actions)))
# print("Times actions Max {}".format(np.max(times_actions)))
# print("Times actions Min {}".format(np.min(times_actions)))
times_episodes.append(time.time() - time_s)
# break
fitness_total += fitness_current
return fitness_total / number_of_rounds, times_episodes
def _validate_genome(self, validation_triple, episode_steps: int = 500, break_all_episodes: bool = False):
validation_genome = validation_triple[0]
validation_seed = validation_triple[1]
brain = self.brain_class(input_size=self.input_size, output_size=self.output_size,
individual=validation_genome, configuration=self.brain_configuration,
brain_state=self.brain_state)
env = gym.make(self.env_name,
num_levels=1,
start_level=validation_seed,
distribution_mode=self.distribution_mode)
ob = env.reset()
ob = np.expand_dims(ob, axis=0)
ob = self.transform_ob(ob)
fitness = 0
done = False
time_s = time.time()
while not done:
action = brain.step(ob.flatten())
ob, rew, done, info = env.step(np.argmax(action))
ob = np.expand_dims(ob, axis=0)
ob = self.transform_ob(ob)
fitness += rew
return [fitness, time.time() - time_s]
def validate_fitness(self, evaluations, episode_steps: int = 500, break_all_episodes: bool = False):
"""
:param evaluations: List of 3-tuples (individual, env_seed, number_of_rounds)
:param episode_steps: Number of steps per episode
:param break_all_episodes: When one episode is done, break all episodes
:return:
"""
old_device = self.device
old_map_location = self.map_location
self.device = torch.device('cpu')
self.map_location = self.device
validation_fitnesses = []
validation_episode_times = []
list_episode_steps = [episode_steps] * len(evaluations)
list_break_all_episodes = [break_all_episodes] * len(evaluations)
with mp.Pool() as pool:
validation_results = pool.starmap(self._validate_genome,
zip(evaluations, list_episode_steps, list_break_all_episodes))
for result in validation_results:
validation_fitnesses.append(result[0])
validation_episode_times.append(result[1])
self.device = old_device
self.map_location = old_map_location
return validation_fitnesses, validation_episode_times
| 40.471616
| 116
| 0.612646
|
41758437b286411f9dd5a13cc1171a9a7be3a4a3
| 1,406
|
py
|
Python
|
python/tests/app_interscalehub.py
|
multiscale-cosim/EBRAINS-translators
|
ac48debf85e83674dad4b54aa1daf5419465f805
|
[
"BSD-3-Clause"
] | 1
|
2021-06-11T09:11:33.000Z
|
2021-06-11T09:11:33.000Z
|
python/tests/app_interscalehub.py
|
multiscale-cosim/EBRAINS-translators
|
ac48debf85e83674dad4b54aa1daf5419465f805
|
[
"BSD-3-Clause"
] | 2
|
2020-07-17T08:34:54.000Z
|
2020-07-17T08:35:31.000Z
|
python/tests/app_interscalehub.py
|
multiscale-cosim/EBRAINS-translators
|
ac48debf85e83674dad4b54aa1daf5419465f805
|
[
"BSD-3-Clause"
] | 1
|
2021-06-08T07:33:21.000Z
|
2021-06-08T07:33:21.000Z
|
# ------------------------------------------------------------------------------
# Copyright 2020 Forschungszentrum Jülich GmbH
# "Licensed to the Apache Software Foundation (ASF) under one or more contributor
# license agreements; and to You under the Apache License, Version 2.0. "
#
# Forschungszentrum Jülich
# Institute: Institute for Advanced Simulation (IAS)
# Section: Jülich Supercomputing Centre (JSC)
# Division: High Performance Computing in Neuroscience
# Laboratory: Simulation Laboratory Neuroscience
# Team: Multi-scale Simulation and Design
#
# ------------------------------------------------------------------------------
import sys
from Interscale_hub.InterscaleHub import InterscaleHub
from placeholders.parameter import Parameter
def run_wrapper(args):
# print(f'****************input from pipe:{input()}')
# direction
# 1 --> nest to Tvb
# 2 --> tvb to nest
param = Parameter()
direction = int(args) # NOTE: will be changed
# receive steering commands init,start,stop
# 1) init InterscaleHUB
# includes param setup, buffer creation
hub = InterscaleHub(param, direction)
# 2) Start signal
# receive, pivot, transform, send
hub.start()
# 3) Stop signal
# disconnect and close ports
hub.stop()
if __name__ == '__main__':
# args 1 = direction
sys.exit(run_wrapper(sys.argv[1]))
| 31.954545
| 81
| 0.610953
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.