blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
36bf83381d9d12a2a2f73d94138ec08698d8c928 | d4f579219d0d557973e6b3d6392d887081825dc3 | /PythonNTF/T1/Naloge/logicnioperatorji.py | 0777d56b1679e3b60086c901e99582547c3a29d6 | [] | no_license | aljazvaupotic/Python-Course | 1eb841cc407105c6e14bdb49445d85484de9c6d9 | d1df7b1a357fef5fbc3cccea83fd5adec25e3edf | refs/heads/master | 2023-08-25T09:05:24.258495 | 2021-11-08T10:00:31 | 2021-11-08T10:00:31 | 186,800,762 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,481 | py | #______________________________________________________________________________
# Termin 1, 16.5.2019
# Logični operatorji
#______________________________________________________________________________
# Logični operator *konjunkcija* ima naslednjo resničnostno tabelo, kjer
# `F` predstavlja neresnično (`False`), `T` pa resnično (`True`) vrednost:
#
# A B | A /\ B
# -----+-------
# F F | F
# F T | F
# T F | F
# T T | T
#
# S pomočjo vgrajenega operatorja `and` enostavno sestavimo funkcijo
# `konjunkcija(a, b)`, ki sprejme logični vrednosti `a` in `b` ter vrne logično
# vrednost konjunkcije `a /\ b`:
#
# def konjunkcija(a, b):
# return a and b
# =====================================================================
# 1. podnaloga
# Logični operator *disjunkcija* ima naslednjo resničnostno tabelo:
#
# A B | A \/ B
# -----+-------
# F F | F
# F T | T
# T F | T
# T T | T
#
# Sestavite funkcijo `disjunkcija(a, b)`, ki sprejme logični vrednosti
# `a` in `b` ter vrne logično vrednost disjunkcije `a \/ b`. Pri tem si
# pomagajte z vgrajenim operatorjem `or`.
# =============================================================================
# =====================================================================
# 2. podnaloga
# Logični operator *negacija* ima naslednjo resničnostno tabelo:
#
# A | ~A
# --+----
# F | T
# T | F
#
# Sestavite funkcijo `negacija(a)`, ki vrne logično vrednost disjunkcije `~a`.
# =============================================================================
# =====================================================================
# 3. podnaloga
# Logični operator *implikacija* ima naslednjo resničnostno tabelo:
#
# A B | A => B
# -----+-------
# F F | T
# F T | T
# T F | F
# T T | T
#
# Sestavite funkcijo `implikacija(a, b)`, ki vrne logično vrednost
# implikacije `a => b`.
# =============================================================================
# =====================================================================
# 4. podnaloga
# Logični operator *ekvivalenca* ima naslednjo resničnostno tabelo:
#
# A B | A <=> B
# -----+--------
# F F | T
# F T | F
# T F | F
# T T | T
#
# Sestavite funkcijo `ekvivalenca(a, b)`, ki vrne logično vrednost ekvivalence
# `a <=> b`.
#
# Namig: Pomagajte si lahko s funkcijo `implikacija`.
# =============================================================================
# =====================================================================
# 5. podnaloga
# Logični operator *ekskluzivni ali* (*exclusive or* ali XOR) ima naslednjo
# resničnostno tabelo:
#
# A B | A XOR B
# -----+--------
# F F | F
# F T | T
# T F | T
# T T | F
#
# Sestavite funkcijo `xor(a, b)`, ki vrne logično vrednost `a XOR b`.
# =============================================================================
# =====================================================================
# 6. podnaloga
# Logični operator *NAND* (*not and*) ima naslednjo
# resničnostno tabelo:
#
# A B | A NAND B
# -----+---------
# F F | T
# F T | T
# T F | T
# T T | F
#
# Sestavite funkcijo `nand(a, b)`, ki vrne logično vrednost `a NAND b`.
# =============================================================================
| [
"noreply@github.com"
] | aljazvaupotic.noreply@github.com |
b2cff122cc6e2e3b7f77f15c0931ccbb7b0bffc9 | 3a4fbde06794da1ec4c778055dcc5586eec4b7d2 | /code-samples/coursera-17.py | 6b9f38101bcabf8e7262bb9c084391cdcce921fd | [] | no_license | raychorn/svn_python-django-projects | 27b3f367303d6254af55c645ea003276a5807798 | df0d90c72d482b8a1e1b87e484d7ad991248ecc8 | refs/heads/main | 2022-12-30T20:36:25.884400 | 2020-10-15T21:52:32 | 2020-10-15T21:52:32 | 304,455,211 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 319 | py | import io
def create_python_script(filename):
comments = "# Start of a new Python program"
with open(filename, "w") as fOut:
fOut.write(comments)
with open(filename, "r") as fIn:
filesize = fIn.seek(0, io.SEEK_END)
return(filesize)
print(create_python_script("program.py"))
| [
"raychorn@gmail.com"
] | raychorn@gmail.com |
2311c794062db12ee14f68625930ee7ec4fc5dd9 | 34a9c26849b3d82318c5d50df1474776e96afc58 | /scheduler/learning_rate/cosine_lr.py | 6de33f7ea0771db16fc3b300dee812194a968e42 | [
"MIT"
] | permissive | vcowwy/CvT_paddle | 483ef210e9864b254f45e556571c686409512afe | de8c28fbbc83e2c6c2479d44971020b15e7b12ec | refs/heads/master | 2023-08-31T08:49:19.237186 | 2021-11-02T09:13:43 | 2021-11-02T09:13:43 | 423,333,423 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,760 | py | import logging
import math
import numpy as np
import paddle
from .scheduler import Scheduler
_logger = logging.getLogger(__name__)
class CosineLRScheduler(Scheduler):
def __init__(self,
optimizer: paddle.optimizer.Optimizer,
t_initial: int,
t_mul: float = 1.0,
lr_min: float = 0.0,
decay_rate: float = 1.0,
warmup_t=0,
warmup_lr_init=0,
warmup_prefix=False,
cycle_limit=0,
t_in_epochs=True,
noise_range_t=None,
noise_pct=0.67,
noise_std=1.0,
noise_seed=42,
initialize=True) -> None:
super().__init__(optimizer,
param_group_field='lr',
noise_range_t=noise_range_t,
noise_pct=noise_pct,
noise_std=noise_std,
noise_seed=noise_seed,
initialize=initialize)
assert t_initial > 0
assert lr_min >= 0
if t_initial == 1 and t_mul == 1 and decay_rate == 1:
_logger.warning('Cosine annealing scheduler will have no effect on the learning rate since t_initial = t_mul = eta_mul = 1.')
self.t_initial = t_initial
self.t_mul = t_mul
self.lr_min = lr_min
self.decay_rate = decay_rate
self.cycle_limit = cycle_limit
self.warmup_t = warmup_t
self.warmup_lr_init = warmup_lr_init
self.warmup_prefix = warmup_prefix
self.t_in_epochs = t_in_epochs
if self.warmup_t:
self.warmup_steps = [((v - warmup_lr_init) / self.warmup_t) for v in self.base_values]
super().update_groups(self.warmup_lr_init)
else:
self.warmup_steps = [1 for _ in self.base_values]
def _get_lr(self, t):
if t < self.warmup_t:
lrs = [(self.warmup_lr_init + t * s) for s in self.warmup_steps]
else:
if self.warmup_prefix:
t = t - self.warmup_t
if self.t_mul != 1:
i = math.floor(math.log(1 - t / self.t_initial * (1 - self.t_mul), self.t_mul))
t_i = self.t_mul ** i * self.t_initial
t_curr = t - (1 - self.t_mul ** i) / (1 - self.t_mul) * self.t_initial
else:
i = t // self.t_initial
t_i = self.t_initial
t_curr = t - self.t_initial * i
gamma = self.decay_rate ** i
lr_min = self.lr_min * gamma
lr_max_values = [(v * gamma) for v in self.base_values]
if self.cycle_limit == 0 or (self.cycle_limit > 0 and i < self.cycle_limit):
lrs = [(lr_min + 0.5 * (lr_max - lr_min) * (1 + math.cos(math.pi * t_curr / t_i))) for lr_max in lr_max_values]
else:
lrs = [self.lr_min for _ in self.base_values]
return lrs
def get_epoch_values(self, epoch: int):
if self.t_in_epochs:
return self._get_lr(epoch)
else:
return None
def get_update_values(self, num_updates: int):
if not self.t_in_epochs:
return self._get_lr(num_updates)
else:
return None
def get_cycle_length(self, cycles=0):
if not cycles:
cycles = self.cycle_limit
cycles = max(1, cycles)
if self.t_mul == 1.0:
return self.t_initial * cycles
else:
return int(math.floor(-self.t_initial * (self.t_mul ** cycles - 1) / (1 - self.t_mul)))
| [
"1719870799@qq.com"
] | 1719870799@qq.com |
8b7099feb3ee046dd8adee97b3da106d2a3c6379 | 9644572133b4cde92745a6c2320069bce926f715 | /general_ocr/datasets/utils/parser.py | c6908ad99250785edfad0034ed46512b122f2d78 | [] | no_license | hnhoangdz/general_ocr | b79306f8078556cdc83690d1d5e19baff30dc878 | 8975731cbc7065aa1825bf857c33b90ad0140c49 | refs/heads/main | 2023-08-16T09:41:56.444851 | 2021-10-19T09:09:44 | 2021-10-19T09:09:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,272 | py | # Copyright (c) GeneralOCR. All rights reserved.
import json
from general_ocr.datasets.builder import PARSERS
from general_ocr.utils import StringStrip
@PARSERS.register_module()
class LineStrParser:
"""Parse string of one line in annotation file to dict format.
Args:
keys (list[str]): Keys in result dict.
keys_idx (list[int]): Value index in sub-string list
for each key above.
separator (str): Separator to separate string to list of sub-string.
"""
def __init__(self,
keys=['filename', 'text'],
keys_idx=[0, 1],
separator=' ',
**kwargs):
assert isinstance(keys, list)
assert isinstance(keys_idx, list)
assert isinstance(separator, str)
assert len(keys) > 0
assert len(keys) == len(keys_idx)
self.keys = keys
self.keys_idx = keys_idx
self.separator = separator
self.strip_cls = StringStrip(**kwargs)
def get_item(self, data_ret, index):
map_index = index % len(data_ret)
line_str = data_ret[map_index]
line_str = self.strip_cls(line_str)
line_str = line_str.split(self.separator)
if len(line_str) <= max(self.keys_idx):
raise Exception(
f'key index: {max(self.keys_idx)} out of range: {line_str}')
line_info = {}
for i, key in enumerate(self.keys):
line_info[key] = line_str[self.keys_idx[i]]
return line_info
@PARSERS.register_module()
class LineJsonParser:
"""Parse json-string of one line in annotation file to dict format.
Args:
keys (list[str]): Keys in both json-string and result dict.
"""
def __init__(self, keys=[]):
assert isinstance(keys, list)
assert len(keys) > 0
self.keys = keys
def get_item(self, data_ret, index):
map_index = index % len(data_ret)
json_str = data_ret[map_index]
line_json_obj = json.loads(json_str)
line_info = {}
for key in self.keys:
if key not in line_json_obj:
raise Exception(f'key {key} not in line json {line_json_obj}')
line_info[key] = line_json_obj[key]
return line_info
| [
"towarddatascience@gmail.com"
] | towarddatascience@gmail.com |
9b48d601669864b025f220379cbbf6243784839f | 9d5d057e4077b77980093a22d39af1cdea1040a0 | /tools/parallel_UT_rule.py | 8d22fd6c2496e1c0748cdb33b1e4bd45e91ccf6e | [
"Apache-2.0"
] | permissive | heavengate/Paddle | 16b8d046e9cba818ba36394bec496180b5cd5938 | f9c801ffa6ac0a8a1a271c09b915d8603aba41ff | refs/heads/develop | 2023-05-23T06:15:22.855477 | 2023-02-08T08:31:23 | 2023-02-08T08:31:23 | 150,394,550 | 1 | 3 | Apache-2.0 | 2022-04-01T13:19:14 | 2018-09-26T08:33:49 | Python | UTF-8 | Python | false | false | 102,498 | py | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import platform
# mem=0 : It run 24 job each time in Single cases; 4 job each time in Multi cases; 5 job each time in exclusive cases
HIGH_PARALLEL_JOB_NEW = [
'mask_util_test',
'test_communicator_ps_gpu',
'preprocess_local_imagenet',
'test_nearest_interp_v2_mkldnn_op',
'op_call_stack_test',
'test_fleet_amp_meta_optimizer',
'test_mkldnn_scale_matmul_fuse_pass',
'bfloat16_gpu_test',
'test_fc_gru_fuse_pass_cc',
'device_worker_test',
'test_custom_conj',
'infer_io_utils_tester',
'test_transpose_bf16_mkldnn_op',
'test_container',
'cpu_helper_test',
'test_fake_init_op',
'test_concat_int8_mkldnn_op',
'test_lookup_table_dequant_op',
'test_broadcast_shape',
'test_program_to_string',
'test_generate_mask_labels_op',
'test_eager_deletion_dynamic_rnn_base',
'test_global_var_getter_setter',
'test_ifelse_basic',
'test_get_set_flags',
'dim_test',
'test_py_reader_return_list',
'test_fleet_meta_optimizer_base',
'test_py_reader_error_msg',
'scope_test',
'buffered_allocator_test',
'test_scaled_dot_product_attention',
'prune_test',
'test_chunk_eval_op',
'test_static_analysis',
'test_fleet_lars_meta_optimizer',
'test_while_op',
'test_runtime_and_compiletime_exception',
'test_precision_recall_op',
'test_get_inputs_outputs_in_block',
'test_lite_engine_op',
'test_repeated_fc_relu_fuse_pass_cc',
'test_mkldnn_matmul_op_output_fuse_pass',
'cudnn_helper_test',
'test_check_abi',
'data_type_test',
'test_recurrent_op',
'test_asp_utils',
'test_paddle_inference_api',
'test_reference_count_pass_last_lived_ops',
'test_op_support_gpu',
'test_conditional_block',
'test_fleet_rolemaker_init',
'test_pybind_interface',
'test_io_save_load',
'test_fusion_lstm_int8_mkldnn_op',
'test_benchmark',
'test_protobuf',
'test_tdm_sampler_op',
'test_teacher_student_sigmoid_loss_op',
'test_transpose_int8_mkldnn_op',
'test_transpose_mkldnn_op',
'test_fleet_rolemaker_4',
'to_string_test',
'test_c_comm_init_all_op',
'test_bilinear_interp_mkldnn_op',
'test_split_bf16_mkldnn_op',
'test_cpu_quantize_squash_pass',
'test_batch_norm_act_fuse_pass',
'test_mkldnn_op_inplace',
'test_seqpool_concat_fuse_pass',
'test_analyzer_save_model',
'test_exception',
'test_similarity_focus_op',
'test_conv_batch_norm_mkldnn_fuse_pass',
'test_sequence_last_step',
'test_mkldnn_cpu_bfloat16_pass',
'op_debug_string_test',
'test_quant2_int8_mkldnn_pass',
'test_layer',
'test_sampling_id_op',
'test_nce',
'graph_helper_test',
'test_static_shape_inferrence_for_shape_tensor',
'test_layer_norm_mkldnn_op',
'test_fleet_launch_async',
'test_multi_gru_fuse_pass',
'test_hash_op',
'test_rpn_target_assign_op',
'test_concat_bf16_mkldnn_op',
'test_fc_lstm_fuse_pass_cc',
'test_version',
'gather_test',
'test_reshape_bf16_op',
'test_compat',
'test_data_feeder',
'cpu_vec_test',
'test_distributed_strategy',
'test_hsigmoid_op',
'test_hooks',
'test_fleet_base_2',
'op_kernel_type_test',
'test_layer_norm_bf16_mkldnn_op',
'test_fleetrun',
'cpu_info_test',
'brpc_utils_test',
'test_fusion_seqexpand_concat_fc_op',
'test_dataset_voc',
'test_analyzer_capi_exp_int',
'test_post_training_quantization_resnet50',
'cuda_helper_test',
'test_conv_concat_relu_mkldnn_fuse_pass',
'test_bf16_utils',
'test_sum_bf16_mkldnn_op',
'test_unsqueeze2_eltwise_fuse_pass_cc',
'dense_table_test',
'test_collective_optimizer',
'test_origin_info',
'test_dgc_optimizer',
'test_avoid_twice_initialization',
'test_reduce_bf16_mkldnn_op',
'test_mkldnn_conv_bias_fuse_pass',
'cow_ptr_tests',
'eigen_test',
'reader_blocking_queue_test',
'test_fusion_gru_op',
'operator_test',
'test_fusion_gru_int8_mkldnn_op',
'test_cpu_bfloat16_pass',
'test_multiprocess_dataloader_iterable_dataset_split',
'test_scope',
'test_analyzer_bfloat16_mobilenetv2',
'test_fleet_rolemaker_2',
'float16_test',
'test_dpsgd_op',
'test_conv_elementwise_add_mkldnn_fuse_pass',
'test_crypto',
'test_sgd_op_bf16',
'test_analyzer_capi_exp_ner',
'lite_subgraph_pass_tester',
'test_tf32_cudnn',
'threadpool_test',
'test_cpu_quantize_pass',
'test_analyzer_capi_exp_pd_tensor',
'tuple_test',
'test_analyzer_lac',
'test_prune',
'test_bilinear_interp_v2_mkldnn_op',
'test_lod_tensor_array',
'test_logging_utils',
'test_fleet_nocvm_1',
'stringprintf_test',
'test_nearest_interp_mkldnn_op',
'test_matmul_mkldnn_op',
'test_debugger',
'test_custom_attrs_jit',
'test_lrn_mkldnn_op',
'test_set_bool_attr',
'version_test',
'test_broadcast_to_op',
'test_squared_mat_sub_fuse_pass',
'test_fleet_ascend_utils',
'test_fused_emb_seq_pool_op',
'test_imperative_data_loader_exit_func',
'test_feed_fetch_method',
'test_protobuf_descs',
'test_fleet_unitaccessor',
'test_sequence_scatter_op',
'test_skip_layernorm_fuse_pass',
'test_fs_interface',
'test_gast_with_compatibility',
'test_repeated_fc_relu_fuse_pass',
'timer_test',
'var_type_traits_test',
'test_py_reader_sample_generator',
'test_conv2d_transpose_mkldnn_op',
'test_fleet_runtime',
'test_rnn_cudnn_params_packing',
'test_mkldnn_placement_pass',
'test_fc_elementwise_layernorm_fuse_pass_cc',
'program_desc_test',
'test_simplify_with_basic_ops_pass',
'test_dygraph_mode_of_unittest',
'gather_op_test',
'test_trainer_desc',
'test_matmul_bf16_mkldnn_op',
'test_analyzer_seq_conv1',
'test_fused_embedding_fc_lstm_op',
'test_conv2d_transpose_bf16_mkldnn_op',
'check_reduce_rank_test',
'test_progressbar',
'test_seed_op',
'test_fc_bf16_mkldnn_op',
'test_sequence_first_step',
'test_fusion_lstm_mkldnn_op',
'test_elementwise_add_bf16_mkldnn_op',
'test_static_save_load_bf16',
'test_elementwise_mul_bf16_mkldnn_op',
'test_distributions',
'operator_exception_test',
'dropout_op_test',
'test_gpu_package_without_gpu_device',
'test_detection_map_op',
'test_zeros_op',
'test_launch_coverage',
'test_mkldnn_conv_activation_fuse_pass',
'test_inference_model_io',
'test_fusion_repeated_fc_relu_op',
'cudnn_desc_test',
'test_beam_search_op',
'test_var_conv_2d',
'test_listen_and_serv_op',
'test_dequantize_mkldnn_op',
'test_analyzer_capi_exp_pd_threads',
'test_selected_rows',
'test_fleet_sharding_meta_optimizer',
'test_inference_api',
'test_data_generator',
'test_deprecated_memory_optimize_interfaces',
'test_ir_skip_layernorm_pass',
'broadcast_op_test',
'test_multihead_matmul_fuse_pass',
'test_lookup_table_bf16_op',
'test_positive_negative_pair_op',
'init_test',
'test_tensorrt',
'test_check_error',
'test_program',
'mmap_allocator_test',
'test_reshape_transpose_matmul_mkldnn_fuse_pass',
'test_communicator_async',
'test_downpoursgd',
'variable_test',
'test_quantization_mkldnn_pass',
'test_quantize_mkldnn_op',
'test_create_op_doc_string',
'test_analyzer_lexical_gru_bfloat16',
'test_imperative_data_loader_process',
'assign_op_test',
'test_analyzer_capi_exp_xpu',
'test_conv_bn_fuse_pass_cc',
'test_recommender_system',
'test_ones_op',
'test_fc_mkldnn_op',
'test_load_op_xpu',
'test_pool2d_int8_mkldnn_op',
'test_mul_int8_mkldnn_op',
'test_scale_matmul_fuse_pass',
'test_fleet_graph_executor',
'decorator_test',
'test_collective_base',
'test_multi_gru_mkldnn_op',
'test_eager_deletion_conditional_block',
'op_proto_maker_test',
'test_mkldnn_op_nhwc',
'test_fc_act_mkldnn_fuse_pass',
'test_fleet_base_3',
'test_query_op',
'test_fleet_base_4',
'save_load_op_test',
'test_batch_sampler',
'test_image_classification_layer',
'test_fusion_gru_mkldnn_op',
'graph_test',
'test_ir_graph',
'test_hapi_hub_model',
'test_requantize_mkldnn_op',
'test_depthwise_conv_mkldnn_pass',
'test_fleet_metric',
'test_fc_fuse_pass_cc',
'test_fleet_private_function',
'test_fleet',
'test_executor_check_feed',
'test_py_reader_lod_level_share',
'nccl_context_test',
'inlined_vector_test',
'test_generate_proposal_labels_op',
'test_analyzer_capi_exp_pd_config',
'test_locality_aware_nms_op',
'test_imperative_decorator',
'test_npair_loss_op',
'test_ps_dispatcher',
'test_analyzer_rnn2',
'test_multi_gru_seq_fuse_pass',
'test_filter_by_instag_op',
'test_switch',
'test_matmul_transpose_reshape_fuse_pass',
'test_mkldnn_caching',
'test_fetch_var',
'op_compatible_info_test',
'complex_test',
'test_fleet_static_mp_layers',
'test_aligned_allocator',
'test_analyzer_transformer_fuse',
'test_sequence_topk_avg_pooling',
'test_analyzer_lexical_gru',
'test_broadcast_error',
'test_context_manager',
'test_registry',
'brpc_service_sparse_sgd_test',
'test_operator',
'test_mkldnn_conv_concat_relu_mkldnn_fuse_pass',
'test_collective_api_base',
'test_entry_attr',
'test_get_places_op',
'test_softmax_mkldnn_op',
'test_dynrnn_static_input',
'auto_growth_best_fit_allocator_test',
'test_batch_norm_mkldnn_op',
'test_bpr_loss_op',
'no_need_buffer_vars_inference_test',
'test_fleet_cc',
'test_download',
'test_fleet_recompute_meta_optimizer',
'test_seqpool_cvm_concat_fuse_pass',
'test_common_infer_shape_functions',
'test_fusion_seqpool_concat_op',
'test_op_compat_sensible_pass',
'test_fs',
'test_fc_rnn_mkldnn_fuse_pass',
'split_test',
'test_fusion_group_pass',
'test_fusion_lstm_bf16_mkldnn_op',
'test_executor_feed_non_tensor',
'test_var_info',
'test_reducescatter',
'test_fleet_ps',
'test_check_import_scipy',
'test_load_vars_shape_check',
'test_nn_functional_embedding_static',
'test_fleet_rolemaker_new',
'test_imperative_base',
'dist_multi_trainer_test',
'test_mine_hard_examples_op',
'test_post_training_quantization_lstm_model',
'aes_cipher_test',
'test_analyzer_zerocopytensor_tensor',
'rw_lock_test',
'exception_holder_test',
'enforce_test',
'test_rnn_memory_helper_op',
'ddim_test',
'test_eager_deletion_padding_rnn',
'test_is_test_pass',
'test_fusion_seqconv_eltadd_relu_op',
'test_fleet_localsgd_meta_optimizer',
'node_test',
'test_analyzer_text_classification',
'test_seq_concat_fc_fuse_pass',
'test_imperative_numpy_bridge',
'test_adaptive_pool2d_convert_global_pass',
'test_lookup_table_v2_bf16_op',
'test_operator_desc',
'test_elementwise_mul_mkldnn_op',
'test_fetch_handler',
'test_cpu_bfloat16_placement_pass',
'test_match_matrix_tensor_op',
'test_fleet_run_random_port',
'test_mkldnn_matmul_transpose_reshape_fuse_pass',
'test_fleet_lamb_meta_optimizer',
'test_op_version',
'fused_broadcast_op_test',
'test_tdm_child_op',
'test_imperative_group',
'test_analyzer_capi_exp',
'test_post_training_quantization_mobilenetv1',
'test_load_op',
'test_executor_and_use_program_cache',
'op_registry_test',
'test_create_global_var',
'test_dispatch_jit',
'table_test',
'test_full_op',
'test_recv_save_op',
'test_fusion_lstm_op',
'test_eager_deletion_recurrent_op',
'brpc_service_dense_sgd_test',
'op_tester',
'test_eager_deletion_mnist',
'test_infer_shape',
'test_fleet_rolemaker',
'test_entry_attr2',
'test_monitor',
'test_require_version',
'test_function_spec',
'test_image',
'lod_tensor_test',
'place_test',
'test_fleet_launch_cloud',
'test_conv2d_bf16_mkldnn_op',
'test_parallel_executor_run_load_infer_program',
'scatter_test',
'graph_to_program_pass_test',
'test_lod_tensor_array_ops',
'test_embedding_eltwise_layernorm_fuse_pass',
'complex_gpu_test',
'save_load_combine_op_test',
'test_logger',
'test_analyzer',
'test_utils',
'barrier_table_test',
'test_memory_usage',
'test_sysconfig',
'reader_test',
'test_conv_bias_mkldnn_fuse_pass_cc',
'math_function_test',
'beam_search_decode_op_test',
'save_quant2_model_resnet50',
'bfloat16_test',
'test_scale_bf16_mkldnn_op',
'test_fp16_utils',
'test_cpu_quantize_placement_pass',
'test_slice_var',
'test_analyzer_ocr',
'test_flags_use_mkldnn',
'pass_test',
'test_trainable',
'test_sync_batch_norm_pass',
'lodtensor_printer_test',
'test_calc_gradient',
'test_create_parameter',
'test_infer_no_need_buffer_slots',
'test_run_fluid_by_module_or_command_line',
'test_boxps',
'test_initializer',
'test_fusion_squared_mat_sub_op',
'test_desc_clone',
'test_analyzer_mobilenet_depthwise_conv',
'test_analyzer_pyramid_dnn',
'test_analyzer_detect_functional_mkldnn',
'errors_test',
'test_name_scope',
'var_type_inference_test',
'test_const_value',
'test_spawn_and_init_parallel_env',
'test_fleet_gradient_scale',
'unroll_array_ops_test',
'op_version_registry_test',
'test_cudnn_placement_pass',
'cipher_utils_test',
'test_program_code',
'test_save_model_without_var',
'program_utils_test',
'test_fleet_distributed_strategy',
'test_hybrid_parallel_topology',
'test_ascend_trigger',
'test_fleet_rolemaker_3',
'test_conv_activation_mkldnn_fuse_pass',
'test_fusion_gru_bf16_mkldnn_op',
'test_model_cast_to_bf16',
'test_quantize_transpiler',
'conditional_block_op_test',
'test_fleet_gradient_merge_meta_optimizer',
'test_graph_pattern_detector',
'test_fleet_fp16_allreduce_meta_optimizer',
'test_unique_name',
'test_multi_out_jit',
'test_attention_lstm_op',
'test_mkldnn_quantizer_config',
'data_layout_transform_test',
'test_conv2d_int8_mkldnn_op',
'test_fusion_seqpool_cvm_concat_op',
'save_quant2_model_gru',
'test_generator',
'test_sum_mkldnn_op',
'test_fleet_util',
'test_fleet_dgc_meta_optimizer',
'selected_rows_functor_test',
'test_default_scope_funcs',
'test_communicator_sync',
'test_communicator_half_async',
'test_dynrnn_gradient_check',
'test_pool2d_bf16_mkldnn_op',
'test_table_printer',
'test_framework_debug_str',
'test_dist_fleet_ps2',
'test_collective_scatter_api',
'test_dist_sparse_tensor_load_ftrl',
'test_dist_mnist_dgc_nccl',
'test_dist_oneps',
'test_dist_tree_index',
'test_dist_fleet_ps',
'test_dist_fleet_a_sync_optimizer_sync',
'test_dist_fleet_decay',
'test_dist_fleet_simnet',
'test_dist_sparse_load_ps1',
'test_dist_mnist_fleet_save',
'test_dist_fleet_ps7',
'test_dist_mnist_fleetapi',
'test_dist_sparse_tensor_load_adam',
'test_dist_fleet_ps_gpu_ctr',
'test_dist_mnist_ring_allreduce',
'test_dist_op',
'test_new_group_api',
'test_dist_fleet_heter_base',
'test_collective_split_col_linear',
'test_parallel_executor_mnist',
'test_dist_fleet_ctr2',
'test_dist_fleet_heter_program',
'test_dist_fleet_ctr',
'test_collective_allreduce_api',
'test_dataloader_unkeep_order',
'test_dataloader_keep_order',
'test_dist_se_resnext_sync',
'test_dist_fleet_ps6',
'test_dist_fleet_a_sync_optimizer_auto_async',
'test_dist_fleet_a_sync_optimizer_auto',
'test_dist_fleet_ps9',
'test_dist_fleet_raw_program_optimizer_fuse_allreduce',
'test_dist_fleet_ps11',
'test_dist_fleet_ps8',
'test_dist_mnist_fp16_allreduce',
'test_dist_fleet_ps12',
'test_collective_split_row_linear',
'test_collective_reduce_api',
'test_multiprocess_dataloader_exception',
'test_collective_allgather_api',
'test_dist_fleet_ps10',
'test_dist_sparse_tensor_load_rmsprop',
'test_collective_split_embedding_none_divisible',
'test_parallel_dygraph_dataparallel',
'test_fleet_graph_execution_meta_optimizer',
'test_dist_fleet_ps3',
'test_dist_mnist_pg',
'test_pipeline_parallel',
'test_dist_fleet_ps5',
'test_dist_fleet_sparse_embedding_ctr',
'test_collective_broadcast_api',
'retry_allocator_test',
'test_dist_mnist_backward_deps',
'test_dist_mnist_multi_comm',
'test_dist_allreduce_op',
'test_parallel_dygraph_sparse_embedding',
'test_dist_se_resnext_dgc',
'test_dist_sharding_save',
'test_dist_fleet_a_sync_optimizer_async',
'test_gen_nccl_id_op',
'test_auto_checkpoint',
'test_collective_split_embedding',
'test_parallel_dygraph_sparse_embedding_over_height',
'test_dist_sparse_tensor_load_momentum',
'test_dist_fleet_ps4',
'test_collective_alltoall_api',
'test_dist_fleet_raw_program_optimizer',
'test_parallel_dygraph_mp_layers',
'test_dist_fleet_geo',
'test_fleet_raw_program_meta_optimizer',
'test_sync_batch_norm_op',
'test_dist_mnist_batch_merge',
'test_fleet_launch_ps',
'test_dist_sparse_tensor_load_sgd',
'test_dist_fleet_a_sync_optimizer_auto_geo',
'test_dist_lookup_sparse_table_fuse_ops',
'test_dist_fleet_a_sync_optimizer_geo',
'test_multiprocess_dataloader_iterable_dataset_static',
'test_dist_fleet_grad_clip',
'test_fleet_pipeline_meta_optimizer_with_recompute',
'test_dist_sparse_load_ps0',
'test_collective_barrier_api',
'test_fleet_pipeline_meta_optimizer',
'test_parallel_dygraph_mnist',
'test_dist_sparse_tensor_load_adagrad',
'test_new_group',
'test_imperative_signal_handler',
'test_parallel_dygraph_sharding_parallel',
'test_dist_hapi_model',
'test_dist_mnist_gradient_merge',
'test_ir_pass_pipeline',
'test_rnn_dp',
'test_parallel_dygraph_no_sync',
'test_parallel_dygraph_no_sync_gradient_check',
'test_hybrid_parallel_inference_helper',
'test_parallel_class_center_sample',
'test_auto_parallel_data_unshard',
'small_vector_test',
'scope_guard_test',
'cinn_cache_key_test',
'test_generate_pass_cc',
'cinn_compiled_object_test',
'cinn_runner_test',
'test_build_cinn_pass',
'cost_model_test',
'device_event_test',
'test_fused_layernorm_residual_dropout_bias',
'test_mkldnn_quantizer',
'test_fused_residual_dropout_bias',
'paddle_infer_api_errors_test',
'test_fused_dropout_act_bias',
'test_analyzer_lexical_gru_int8',
'workqueue_test',
'feed_forward_test',
'test_analyzer_lexical_gru_int8_multi_gru',
'test_pow2_warmup_op',
'test_dlpack',
'test_ops_roi_align',
'test_auto_parallel_parallelizer',
'test_ops_roi_pool',
'test_backward_infer_var_data_type_shape',
'test_auto_parallel_completion',
'test_cuda_device_count',
'test_cuda_device_name_capability',
'test_auto_parallel_completion_gpt',
'test_class_center_sample_op',
'test_dataset_consistency_inspection',
'test_cuda_empty_cache',
'test_cuda_graph',
'test_cuda_graph_static_mode',
'test_disable_signal_handler',
'test_eig_op',
'test_eigh_op',
'test_determinant_op',
'test_executor_check_fetch_list',
'test_functional_conv1d_transpose',
'test_functional_conv1d',
'test_get_device_properties',
'test_fill_diagonal_tensor_op',
'test_linalg_cond',
'test_memory_analysis',
'test_matrix_rank_op',
'test_merged_momentum_op',
'test_parallel_executor_run_cinn',
'test_parallel_dygraph_dataparallel_cpuonly',
'test_eigvals_op',
'test_sparse_attention_op',
'test_auto_parallel_partitioner',
'test_signal',
'test_auto_parallel_reshard',
'test_auto_parallel_reshard_mppp',
'test_auto_parallel_partitioner_gpt',
'test_fleet_hybrid_meta_optimizer',
'test_auto_parallel_reshard_serial',
'test_auto_parallel_reshard_dpmppp',
'test_clip_mkldnn_op',
'test_elementwise_sub_mkldnn_op',
'test_flatten_mkldnn_op',
'test_slice_mkldnn_op',
'test_ir_generate_pass',
'test_ir_subgraph_python_interface',
'test_trt_convert_concat',
'test_trt_convert_gather_nd',
'test_trt_convert_multihead_matmul',
'test_trt_convert_reduce_sum',
'save_quant2_model_lstm',
'test_trt_convert_slice',
'test_quant2_int8_lstm_mkldnn',
]
# mem=0 but always timeout or failed : It run 15 job each time in Single cases;
SECONDARY_HIGH_PARALLEL_JOB_NEW = [
'test_dataset_conll05',
'test_conv3d_mkldnn_op',
'test_matrix_nms_op',
'test_data',
'test_analyzer_paddletensor_tensor',
'test_linear_chain_crf_op',
'test_analyzer_multi_model_prediction',
'test_default_dtype',
'device_context_test',
'test_analyzer_googlenet',
'jit_kernel_test',
'profiler_test',
'preprocess_local_pascalvoc',
'test_conv2d_transpose_layer',
'test_analyzer_int8_googlenet',
'test_analyzer_seq_pool1_compare_determine',
'save_quant2_model_ernie',
'test_parallel_executor_seresnext_with_fuse_all_reduce_cpu',
'test_dataset_uci_housing',
'test_parallel_executor_seresnext_base_cpu',
'test_dataset_download',
'test_quant_int8_mobilenetv1_mkldnn',
'test_crf_decoding_op',
'test_conv3d_transpose_layer',
'test_quant2_int8_mobilenetv1_mkldnn',
'test_softmax_bf16_mkldnn_op',
'test_quant2_int8_resnet50_range_mkldnn',
'test_pool2d_mkldnn_op',
'test_flags_mkldnn_ops_on_off',
'test_c_comm_init_op',
'test_uniform_random_bf16_op',
'test_custom_concat',
'test_weight_quantization_mobilenetv1',
'test_concat_mkldnn_op',
'test_gaussian_random_mkldnn_op',
'test_parallel_executor_seresnext_with_reduce_cpu',
'test_dataset_imikolov',
'test_analyzer_rnn1',
'test_conv2d_mkldnn_op',
'test_conv3d_layer',
'test_error_clip',
'selected_rows_test',
'test_static_save_load_large',
'test_bipartite_match_op',
'test_conv2d_layer',
'test_analyzer_seq_pool1_fuse_statis',
'test_split_plugin',
'test_analyzer_small_dam',
'test_analyzer_capi_exp_gpu',
'test_quant2_int8_resnet50_channelwise_mkldnn',
'test_analyzer_bert',
'test_directory_migration',
'test_elementwise_add_mkldnn_op',
'test_quant_int8_googlenet_mkldnn',
'test_callback_early_stop',
]
# mem=0 but always timeout or failed : It run 12 job each time in Single cases;
THIRD_HIGH_PARALLEL_JOB_NEW = [
'test_api_impl',
'test_analyzer_seq_pool1_fuse_compare_zero_copy',
'test_analyzer_seq_pool1_profile',
'test_analyzer_mobilenet_transpose',
'test_analyzer_resnet50',
'test_analyzer_int8_resnet50',
'test_analyzer_int8_mobilenetv2',
'test_analyzer_bfloat16_resnet50',
'test_analyzer_bfloat16_mobilenetv1',
'test_analyzer_int8_mobilenet_ssd',
'test_dataset_cifar',
'test_dataset_imdb',
'test_dataset_movielens',
'test_datasets',
'test_allgather',
'test_c_concat',
'test_c_split',
'test_collective_reduce',
'test_cyclic_cifar_dataset',
'test_dyn_rnn',
'test_multiclass_nms_op',
'test_communicator_geo',
'test_quant_int8_mobilenetv2_mkldnn',
'test_analyzer_seq_pool1',
'test_analyzer_transformer',
'test_analyzer_transformer_profile',
'test_analyzer_int8_mobilenetv1',
'test_analyzer_bfloat16_googlenet',
'test_analyzer_quant_performance_benchmark',
'test_dataset_wmt',
'test_allreduce',
'test_broadcast',
'test_c_identity',
'test_collective_sendrecv_api',
'test_fleet_utils',
'test_fused_elemwise_activation_op',
'test_group_norm_op',
'test_fleet_launch_nproc',
'test_quant_int8_resnet50_mkldnn',
'test_quant2_int8_ernie_mkldnn',
'convert_model2dot_ernie',
]
# mem != 0: It run 7 job each time in Single cases; 4 job each time in Multi cases; 3 job each time in exclusive cases
FOURTH_HIGH_PARALLEL_JOB_NEW = [
'test_meshgrid_op',
'test_word2vec',
'test_analyzer_ner',
'test_fetch_lod_tensor_array',
'test_adagrad_op_v2',
'test_conv2d_fusion_op',
'test_hapi_amp',
'test_metrics',
'test_clip_by_norm_op',
'test_lr_scheduler',
'test_generate_proposals_op',
'test_masked_select_op',
'test_imperative_ocr_attention_model',
'test_sentiment',
'test_chunk_op',
'test_memcpy_op',
'test_warpctc_op',
'test_row_conv_op',
'test_grid_sample_function',
'test_rnn_nets',
'test_pad3d_op',
'test_imperative_mnist_sorted_gradient',
'tensor_test',
'test_tensorrt_engine_op',
'test_dot_op',
'test_real_imag_op',
'test_adam_optimizer_fp32_fp64',
'test_reduce_op',
'test_density_prior_box_op',
'test_top_k_op',
'test_grid_generator',
'test_randn_op',
'test_activation_mkldnn_op',
'test_lac',
'test_pad_op',
'test_lstmp_op',
'test_loop',
'test_pylayer_op',
'data_device_transform_test',
'test_trt_roi_align_op',
'test_nn_functional_hot_op',
'test_top_k_v2_op',
'test_crop_op',
'test_conv_bn_fuse_pass',
'test_beam_search_decode_op',
'test_auc_op',
'test_pool2d_op',
'test_gaussian_random_op',
'test_maximum_op',
'test_rnn_cell_api',
'device_code_test',
'test_ir_inplace_pass',
'test_cos_sim_op',
'test_lite_tensor_utils',
'test_fit_a_line',
'test_mish_op',
'test_transpose_op',
'test_mean_iou',
'test_conv3d_transpose_op',
'test_jit_save_load',
'test_unsqueeze2_op',
'test_eager_deletion_while_op',
'test_zeros_like_op',
'test_c_embedding_op',
'test_regularizer',
'zero_copy_tensor_test',
'test_tensor_shape',
'test_resnet',
'test_resnet_amp',
'test_dygraph_weight_norm',
'test_tracer',
'test_list',
'test_sequence_concat',
'test_adaptive_avg_pool1d',
'test_elementwise_div_op',
'test_conv1d_transpose_layer',
'test_adamw_op',
'trt_fc_prelu_test',
'test_temporal_shift_op',
'test_naive_best_fit_gpu_memory_limit',
'dlpack_tensor_test',
'test_elementwise_max_op',
'test_typing',
'test_asp_pruning_2d_greedy',
'test_fake_dequantize_op',
'test_crop_tensor_op',
'test_imperative_load_static_param',
'test_imperative_qat_user_defined',
'test_anchor_generator_op',
'test_prepare_op',
'test_conj_op',
'test_imperative_hook_for_layer',
'test_roi_pool_op',
'test_strided_slice_op',
'test_norm_all',
'test_weight_decay',
'test_functional_conv2d',
'test_functional_conv3d_transpose',
'test_imperative_layer_trainable',
'test_imperative_data_parallel',
'test_digamma_op',
'test_distribution',
'test_box_clip_op',
'custom_tensor_test',
'test_marker_op',
'test_dataloader_early_reset',
'test_gather_nd_op',
'test_tensor_register_hook',
'test_retain_graph',
'test_network_with_dtype',
'test_basic_api_transformation',
'test_diag',
'test_lod_array_length_op',
'test_reinforcement_learning',
'test_softmax_op',
'test_fc_fuse_pass',
'test_adaptive_max_pool2d',
'test_inverse_op',
'test_declarative',
'test_imperative_double_grad',
'test_tensor_methods',
'test_pool1d_api',
'system_allocator_test',
'test_print',
'test_tensor_type_promotion',
'test_bce_with_logits_loss',
'test_tensor',
'test_cross_op',
'concat_test',
'test_ast_util',
'test_proximal_adagrad_op',
'test_pairwise_distance',
'test_imperative_mnist',
'test_beam_search_decoder',
'test_build_strategy_fusion_group_pass',
'test_dygraph_spectral_norm',
'test_scale_mkldnn_op',
'test_load_state_dict_from_old_format',
'test_margin_rank_loss_op',
'test_lookup_table_v2_op',
'test_mix_precision_all_reduce_fuse',
'test_spp_op',
'test_op_converter',
'test_mixed_vector',
'test_roi_align_op',
'test_pad_constant_like',
'test_mul_op',
'test_spectral_norm_op',
'test_transformer',
'test_for_enumerate',
'test_variable_trans_func',
'test_squared_l2_distance_op',
'test_quantize_transpiler_v2',
'test_im2sequence_op',
'test_reader_reset',
'test_one_hot_op',
'test_adaptive_max_pool1d',
'test_label_smooth_op',
'test_parallel_executor_fetch_feed',
'test_cast',
'test_parallel_dygraph_sync_batch_norm',
'test_collect_fpn_proposals_op',
'test_expand_as_v2_op',
'test_device',
'test_code_generator',
'test_asp_pruning_2d_best',
'test_fleet_with_asp',
'test_pool2d_api',
'test_mean_op',
'test_is_tensor',
'test_run_program_op',
'test_cuda_random_seed',
'test_linear_interp_op',
'test_fuse_all_reduce_pass',
'tensor_util_test',
'test_median',
'test_nanmedian',
'test_linear',
'test_imperative_qat_amp',
'test_truncated_gaussian_random_op',
'test_lstm_cudnn_op',
'copy_same_tensor_test',
'test_squeeze2_op',
'naive_best_fit_allocator_test',
'test_model',
'test_py_reader_combination',
'test_prior_box_op',
'test_matmul_v2_mkldnn_op',
'test_sum_op',
'test_paddle_imperative_double_grad',
'test_norm_op',
'test_pool3d_api',
'test_imperative_gan',
'test_sequence_softmax_op',
'test_rand_op',
'test_expand_v2_op',
'test_word2vec_book',
'test_histogram_op',
'test_min_op',
'test_mse_loss',
'test_sign_op',
'selected_rows_functor_gpu_test',
'test_fleet_base',
'test_logsumexp',
'test_detection',
'test_image_classification_fp16',
'test_random_seed',
'test_op_function_generator',
'test_unique_with_counts',
'test_complex_elementwise_layers',
'test_array_read_write_op',
'test_fusion_group_op',
'test_imperative_layer_apply',
'test_executor_return_tensor_not_overwriting',
'test_optimizer_in_control_flow',
'test_lookup_table_op',
'test_randint_op',
'test_randint_like',
'test_convert_call',
'test_sigmoid_cross_entropy_with_logits_op',
'copy_cross_scope_test',
'test_normalization_wrapper',
'test_flip',
'test_cosine_similarity_api',
'test_cumsum_op',
'test_range',
'test_log_loss_op',
'test_where_index',
'test_tril_triu_op',
'test_lod_reset_op',
'test_lod_tensor',
'test_addmm_op',
'test_index_select_op',
'test_index_add_op',
'test_nvprof',
'test_index_sample_op',
'test_unstack_op',
'test_increment',
'strided_memcpy_test',
'test_target_assign_op',
'test_trt_dynamic_shape_transformer_prune',
'test_box_decoder_and_assign_op',
'test_trt_dynamic_shape',
'test_mnist',
'test_convert_operators',
'test_fill_any_like_op',
'test_fill_constant_op',
'test_callback_reduce_lr_on_plateau',
'test_tile_op',
'test_logical',
'test_deformable_conv_op',
'test_elementwise_add_grad_grad',
'test_simple_rnn_op',
'test_bicubic_interp_op',
'test_batch_norm_op_v2',
'test_custom_relu_op_jit',
'test_math_op_patch_var_base',
'test_se_resnet',
'test_device_guard',
'test_elementwise_div_grad_grad',
'test_minus_op',
'test_shard_index_op',
'test_momentum_op',
'test_modelaverage',
'test_compare_reduce_op',
'test_affine_grid_op',
'test_allclose_layer',
'test_elementwise_pow_op',
'test_trt_subgraph_pass',
'test_adaptive_avg_pool2d',
'test_functional_conv3d',
'test_executor_and_mul',
'test_kron_op',
'test_cast_mkldnn_op',
'test_imperative_auto_prune',
'allocator_facade_frac_flags_test',
'test_fill_zeros_like_op',
'test_gather_tree_op',
'test_elementwise_mul_op',
'test_cycle_gan',
'test_parallel_executor_transformer_auto_growth',
'test_bitwise_op',
'test_uniform_random_op',
'trt_split_converter_test',
'test_huber_loss_op',
'test_slice',
'test_label_smooth_functional',
'test_conv_shift_op',
'test_imperative_optimizer_v2',
'test_len',
'test_imperative_named_members',
'test_sequence_reshape',
'test_elementwise_min_op',
'test_flatten2_op',
'test_param_guard',
'test_imperative_ptb_rnn',
'test_batch_fc_op',
'test_Tensor_type',
'test_complex_getitem',
'lod_tensor_gpu_test',
'im2col_test',
'test_unbind_op',
'test_imperative_ptq',
'test_auc_single_pred_op',
'test_imperative_reinforcement',
'test_tf32_cublas',
'test_return',
'test_py_reader_push_pop',
'test_lstm',
'test_dygraph_mnist_fp16',
'test_shuffle_channel_op',
'test_partial_concat_op',
'test_fill_zeros_like2_op',
'test_deformable_conv_v1_op',
'test_complex_grad_accumulated',
'test_sequence_mask',
'test_fill_op',
'test_imperative_deepcf',
'test_multiply',
'test_partial_program',
'test_fetch_feed',
'test_group',
'test_trt_reduce_sum_op',
'data_type_transform_test',
'test_gru_rnn_op',
'test_argsort_op',
'test_batch_norm_op',
'test_inplace',
'test_deprecated_decorator',
'test_complex_cast',
'test_diag_v2',
'test_iou_similarity_op',
'test_inplace_auto_generated_apis',
'test_dataset',
'test_bilinear_api',
'test_empty_like_op',
'test_imperative_layer_children',
'nccl_op_test',
'test_tree_conv_op',
'test_share_data_op',
'test_ir_memory_optimize_transformer',
'test_math_op_patch',
'test_base_layer',
'test_dequantize_log_op',
'test_complex_matmul',
'test_prelu_op',
'test_l1_norm_op',
'test_rmsprop_op',
'test_fuse_bn_act_pass',
'test_inplace_addto_strategy',
'test_ptb_lm_v2',
'test_paddle_save_load',
'test_prelu_mkldnn_op',
'test_box_coder_op',
'test_atan2_op',
'test_unsqueeze_op',
'test_profiler',
'test_affine_channel_op',
'test_leaky_relu_grad_grad_functor',
'test_ctc_align',
'test_fuse_relu_depthwise_conv_pass',
'test_complex_kron',
'test_imperative_skip_op',
'test_dgc_op',
'test_regularizer_api',
'test_nll_loss',
'test_imperative_layers',
'test_rnn_decode_api',
'test_imperative_partitial_backward',
'test_where_op',
'test_std_layer',
'test_ir_embedding_eltwise_layernorm_fuse_pass',
'test_multihead_attention',
'test_grid_sampler_op',
'test_initializer_nn',
'test_var_base',
'test_fuse_elewise_add_act_pass',
'test_select_input_output_op',
'test_lstm_op',
'test_break_continue',
'test_imperative_parallel_coalesce_split',
'test_expand_as_op',
'test_user_defined_quantization',
'test_tensor_to_list',
'test_limit_gpu_memory',
'test_adamax_api',
'test_softmax_mask_fuse_upper_triangle_op',
'test_fake_quantize_op',
'vol2col_test',
'test_cast_op',
'test_proximal_gd_op',
'test_mul_nn_grad',
'test_full_like_op',
'trt_instance_norm_test',
'test_elementwise_mod_op',
'test_grad_clip_minimize',
'test_one_hot_v2_op',
'test_complex_sum_layer',
'test_isfinite_v2_op',
'test_is_empty_op',
'test_simnet_v2',
'beam_search_test',
'test_randperm_op',
'test_elementwise_add_op_inplace',
'test_imperative_selected_rows',
'test_py_reader_using_executor',
'test_activation_op',
'test_nn_functional_embedding_dygraph',
'test_reshape_op',
'test_maxout_op',
'test_sigmoid_focal_loss',
'test_manual_seed',
'test_lrn_op',
'test_dataset_dataloader',
'test_complex_variable',
'test_lite_engine',
'test_neg_op',
'test_view_op_reuse_allocation',
'test_split_op',
'test_ptb_lm',
'test_elementwise_sub_op',
'test_compare_op',
'test_simnet',
'test_normal',
'test_tensor_scalar_type_promotion_static',
'test_trt_group_norm_op',
'test_learning_rate_scheduler',
'test_numel_op',
'test_adaptive_max_pool3d',
'test_sequential',
'test_imperative_optimizer',
'test_subtract_op',
'test_conv_transpose_nn_grad',
'test_sigmoid_focal_loss_op',
'test_cuda_stream_event',
'test_sequence_pad_op',
'test_rnn_cells',
'test_partial_sum_op',
'test_rnn_nets_static',
'test_max_op',
'test_logical_op',
'test_squared_l2_norm_op',
'test_center_loss',
'test_quantization_pass',
'test_imperative_gnn',
'test_conv_elementwise_add_act_fuse_pass',
'test_roll_op',
'test_imperative_container_layerdict',
'test_shape_op',
'test_bmm_op',
'test_matmul_v2_op',
'test_hinge_loss_op',
'test_imperative_qat',
'test_add_position_encoding_op',
'test_rnn_op',
'test_gradient_clip',
'test_py_reader_pin_memory',
'test_concat_op',
'test_weight_decay_extend',
'test_accuracy_op',
'test_cond',
'test_resnet_v2',
'test_adagrad_op',
'test_mv_op',
'test_print_op',
'test_grad',
'test_square_error_cost',
'test_rnn_cells_static',
'test_mkldnn_batch_norm_act_fuse_pass',
'test_input_spec',
'test_adam_op',
'test_elementwise_floordiv_op',
'test_diagonal_op',
'test_nearest_interp_op',
'test_diag_embed',
'test_merge_selectedrows_op',
'test_feed_data_check_shape_type',
'test_complex_trace_layer',
'test_slice_op',
'test_bmn',
'test_nn_quant_functional_layers',
'test_broadcast_tensors_op',
'test_selu_op',
'test_group_norm_op_v2',
'test_tensor_to_numpy',
'test_queue',
'test_rank_loss_op',
'test_trace_op',
'test_case',
'test_prroi_pool_op',
'test_op_name_conflict',
'test_psroi_pool_op',
'test_set_value_op',
'test_ones_like',
'test_assign_value_op',
'test_ema',
'test_lamb_op',
'test_dgc_momentum_op',
'test_custom_grad_input',
'test_trunc_op',
'test_bernoulli_op',
'test_custom_relu_model',
'test_backward',
'test_conv3d_transpose_part2_op',
'test_complex_transpose',
'test_memory_reuse_exclude_feed_var',
'test_polygon_box_transform',
'math_function_gpu_test',
'test_program_prune_backward',
'test_ema_fleet',
'test_fleet_amp_init',
'test_normalize',
'test_correlation',
'test_conv_elementwise_add2_act_fuse_pass',
'test_imperative_container_layerlist',
'test_dequantize_abs_max_op',
'test_fuse_optimizer_pass',
'test_optimizer',
'test_dynamic_rnn_stop_gradient',
'test_raw_program_optimizer',
'test_pow',
'test_inplace_softmax_with_cross_entropy',
'test_transforms',
'test_unfold_op',
'test_assign_op',
'test_isinstance',
'auto_growth_best_fit_allocator_facade_test',
'test_cholesky_op',
'test_adaptive_avg_pool3d',
'test_paddle_save_load_binary',
'test_fused_fc_elementwise_layernorm_op',
'test_sequence_enumerate_op',
'test_lgamma_op',
'test_modified_huber_loss_op',
'trt_quant_int8_test',
'test_callback_visualdl',
'test_linspace',
'test_update_loss_scaling_op',
'test_arg_min_max_op',
'test_bce_loss',
'test_nn_margin_rank_loss',
'test_arg_min_max_v2_op',
'test_variance_layer',
'test_quantization_scale_pass',
'test_segment_ops',
'test_layers',
'test_isfinite_op',
'test_imperative_qat_channelwise',
'test_eye_op',
'test_imperative_framework',
'test_l1_loss',
'test_ifelse',
'test_cache_program',
'test_ir_fc_fuse_pass',
'test_kldiv_loss_op',
'test_switch_case',
'test_unique',
'test_prod_op',
'test_edit_distance_op',
'test_sequence_expand_as',
'test_full_name_usage',
'test_glu',
'test_pad2d_op',
'test_read_file',
'test_erf_op',
'test_sequence_unpad_op',
'test_sequence_conv',
'allocator_facade_abs_flags_test',
'test_detach',
'test_cross_entropy_op',
'test_wrappers',
'test_fleet_base_single',
'test_conv_elementwise_add_fuse_pass',
'test_auto_growth_gpu_memory_limit',
'test_sequence_reverse',
'test_fc_op',
'test_diagflat',
'test_adamax_op',
'test_op_attr',
'paddle_infer_api_test',
'test_mixed_precision',
'lite_mul_model_test',
'test_sort_op',
'test_imperative_out_scale',
'test_vision_models',
'test_rnn_encoder_decoder',
'test_fleet_with_asp_amp',
'test_partial_eager_deletion_transformer',
'test_imperative_star_gan_with_gradient_penalty',
'test_stack_op',
'test_shuffle_batch_op',
'test_clip_op',
'test_py_func_op',
'test_pool_max_op',
'test_log_softmax',
'test_imperative_container_parameterlist',
'test_multiplex_op',
'test_trt_transpose_flatten_concat_fuse_pass',
'test_seqconv_eltadd_relu_fuse_pass',
'test_assert_op',
'test_scatter_nd_op',
'test_sequence_expand',
'test_arange',
'test_translated_layer',
'test_decoupled_py_reader_data_check',
'test_analyzer_ernie_large',
'test_tensor_array_to_tensor',
'test_functional_conv2d_transpose',
'test_error',
'test_callbacks',
'test_imperative_recurrent_usage',
'test_deform_conv2d',
'test_coalesce_tensor_op',
'test_tsm',
'test_fused_multihead_matmul_op',
'test_softmax_mask_fuse_op',
'test_optimizer_grad',
'test_complex_abs',
'test_gradient_accmulator',
'test_instance_norm_op_v2',
'test_random_crop_op',
'test_mobile_net',
'test_parallel_executor_transformer',
'test_tensor_scalar_type_promotion_dynamic',
'test_eager_deletion_delete_vars',
'test_asp_pruning_1d',
'test_imperative_using_non_zero_gpu',
'test_machine_translation',
'test_flatten_op',
'test_onnx_export',
'test_optimizer_for_varbase',
'test_fusion_transpose_flatten_concat_op',
'best_fit_allocator_test',
'test_ir_fusion_group_pass',
'test_trt_quant_conv2d_dequant_fuse_pass',
'test_allclose_op',
'test_ftrl_op',
'test_elementwise_add_op',
'test_instance_norm_op',
'test_lambv2_op',
'test_yolo_box_op',
'test_parallel_executor_drop_scope',
'test_generator_dataloader',
'test_conv2d_transpose_op_depthwise_conv',
'test_imperative_save_load_v2',
'test_lookahead',
'test_moving_average_abs_max_scale_op',
'test_roi_perspective_transform_op',
'test_tensorrt_engine',
'test_affine_grid_function',
'test_nonzero_api',
'test_ir_memory_optimize_pass',
'test_reduce_mkldnn_op',
'test_bilinear_interp_op',
'test_cvm_op',
'test_scale_op',
'test_matmul_op',
'test_sequence_pool',
'test_complex_simplenet',
'test_complex_reshape',
'test_flatten_contiguous_range_op',
'test_python_operator_overriding',
'lite_resnet50_test',
'test_sequence_erase_op',
'test_deformable_psroi_pooling',
'test_multi_precision_fp16_train',
'test_adam_op_multi_thread',
'test_decoupled_py_reader',
'test_distribute_fpn_proposals_op',
'transform_test',
'test_nan_inf',
'test_fuse_bn_add_act_pass',
'test_unpool_op',
'test_parallel_executor_dry_run',
'test_layer_norm_op_v2',
'test_embedding_id_stop_gradient',
'test_mkldnn_fc_act_fuse_pass',
'sequence_pooling_test',
'test_get_tensor_from_selected_rows_op',
'test_imperative_ptb_rnn_sorted_gradient',
'test_hapi_hub',
'test_reverse_op',
'test_compiled_program',
'test_lambda',
'test_adadelta_op',
'test_nn_sigmoid_op',
'test_nearest_interp_v2_op',
'test_sequence_slice_op',
'test_program_translator',
'malloc_test',
'test_size_op',
'test_analysis_predictor',
'test_recognize_digits',
'test_parameter',
'test_transpose_flatten_concat_fuse_pass',
'test_imperative_trace_non_persistable_inputs',
'test_pass_builder',
'thread_local_allocator_test',
'test_variable',
'test_fsp_op',
'test_elementwise_gradient_op',
'test_multinomial_op',
'test_trt_shuffle_channel_detect_pass',
'test_generate_proposals_v2_op',
'test_graph',
'test_gelu_op',
'test_sample_logits_op',
'test_weight_normalization',
'test_activation_bf16_mkldnn_op',
'trt_dynamic_shape_test',
'test_traced_layer_err_msg',
'test_conv1d_layer',
'test_asp_optimize',
'test_imperative_container_sequential',
'test_bert',
'test_transformer_api',
'test_linear_interp_v2_op',
'test_pixel_shuffle',
'test_expand_op',
'test_save_load',
'test_dropout_op',
'test_while_loop_op',
'float16_gpu_test',
'test_dict',
'test_bilinear_tensor_product_op',
'test_parallel_executor_pg',
'test_assert',
'test_smooth_l1_loss_op',
'sequence_padding_test',
'test_analyzer_ernie',
'test_minimum_op',
'test_yolov3_loss_op',
'test_decayed_adagrad_op',
'test_split_mkldnn_op',
'test_squeeze_op',
'test_save_inference_model',
'test_smooth_l1_loss',
'test_bilateral_slice_op',
'test_inplace_abn_op',
'test_fetch_unmerged',
'test_parallel_executor_feed_persistable_var',
'test_parallel_executor_fetch_isolated_var',
'test_parallel_executor_inference_feed_partial_data',
'test_parallel_executor_seresnext_base_gpu',
'test_parallel_executor_test_while_train',
'test_parallel_executor_seresnext_with_fuse_all_reduce_gpu',
'test_parallel_ssa_graph_inference_feed_partial_data',
'test_parallel_executor_seresnext_with_reduce_gpu',
'test_data_norm_op',
'test_install_check',
'graph_node_test',
'trt_mobilenet_test',
'trt_cascade_rcnn_test',
'trt_resnext_test',
'test_activation_nn_grad',
'test_trt_dynamic_shape_ernie_fp16_ser_deser',
'test_cross_entropy2_op',
'test_layer_norm_op',
'test_pool3d_op',
'test_static_save_load',
'test_trt_flatten_op',
'test_trt_yolo_box_op',
'test_trt_reshape_op',
'test_trt_elementwise_op',
'test_trt_affine_channel_op',
'test_trt_matmul',
'test_trt_fc_fuse_pass',
'test_trt_pad_op',
'trt_resnet50_test',
'test_imperative_lod_tensor_to_selected_rows',
'test_gru_unit_op',
'test_amp_check_finite_and_scale_op',
'test_imperative_selected_rows_to_lod_tensor',
'test_add_reader_dependency',
'test_imperative_transformer_sorted_gradient',
'test_bicubic_interp_v2_op',
'test_rank_attention_op',
'test_space_to_depth_op',
'test_image_classification',
'test_custom_relu_op_setup',
'test_sgd_op',
]
# mem != 0 : It run 7 job each time in Single cases; 3 job each time in exclusive cases
FIFTH_PARALLEL_JOB_NEW = [
'test_buffer_shared_memory_reuse_pass',
'test_buffer_shared_memory_reuse_pass_and_fuse_optimization_op_pass',
'test_multiprocess_reader_exception',
'buddy_allocator_test',
'test_multiprocess_dataloader_dataset',
'test_multiprocess_dataloader_dynamic',
'test_multiprocess_dataloader_static',
'test_imperative_resnet',
'test_nn_grad',
'test_conv2d_op_depthwise_conv',
'test_yolov3',
'test_conv_nn_grad',
'test_imperative_data_loader_fds_clear',
'test_conv2d_op',
'test_imperative_data_loader_base',
'test_imperative_resnet_sorted_gradient',
'test_multiprocess_dataloader_iterable_dataset_dynamic',
'test_imperative_se_resnext',
'test_norm_nn_grad',
'test_conv2d_api',
]
SIXTH_PARALLEL_JOB_NEW = [
'paddle_infer_api_copy_tensor_tester',
'test_fill_any_op',
'test_frame_op',
'test_linalg_pinv_op',
'test_linalg_lstsq_op',
'test_gumbel_softmax_op',
'test_matrix_power_op',
'test_multi_dot_op',
'test_searchsorted_op',
'test_overlap_add_op',
'test_sparse_momentum_op',
'test_solve_op',
'test_tensor_fill_diagonal_',
'test_tensor_fill_diagonal_tensor_',
'test_vjp_jvp',
'test_fft_with_static_graph',
'test_svd_op',
'test_hessian',
'test_jacobian',
'test_spectral_op',
'test_trt_conv3d_op',
'test_trt_conv3d_transpose_op',
'test_trt_tuned_dynamic_shape',
'test_trt_convert_activation',
'test_trt_convert_affine_channel',
'test_trt_convert_anchor_generator',
'test_trt_fc_fuse_quant_dequant_pass',
'test_trt_convert_batch_norm',
'test_trt_conv_quant_dequant_pass',
'test_trt_convert_elementwise',
'test_trt_convert_depthwise_conv2d_transpose',
'test_trt_convert_flatten',
'test_trt_matmul_quant_dequant',
'test_trt_convert_dropout',
'test_trt_convert_conv2d_transpose',
'test_trt_convert_group_norm',
'test_trt_convert_layer_norm',
'test_trt_convert_hard_swish',
'test_trt_convert_mish',
'test_trt_convert_gather',
'test_trt_convert_gelu',
'test_trt_convert_reshape',
'test_trt_convert_conv2d_fusion',
'test_trt_convert_conv2d',
'test_trt_convert_instance_norm',
'test_trt_convert_skip_layernorm',
'test_trt_convert_scale',
'test_trt_convert_leaky_relu',
'test_trt_convert_softmax',
'test_trt_convert_pad',
'test_trt_convert_tile',
'test_trt_convert_depthwise_conv2d',
'test_trt_convert_stack',
'test_trt_convert_prelu',
'test_trt_convert_shuffle_channel',
'test_trt_convert_yolo_box',
'test_trt_convert_roi_align',
'test_trt_convert_split',
'test_trt_convert_transpose',
'test_standalone_executor',
'test_trt_convert_pool2d',
'test_trt_convert_emb_eltwise_layernorm',
'trt_quant_int8_yolov3_r50_test',
'test_trt_dynamic_shape_ernie',
'test_trt_reduce_mean_op',
'test_trt_nearest_interp_op',
'test_trt_instance_norm_op',
'test_trt_conv_pass',
'test_trt_scale_op',
'test_trt_slice_plugin',
'test_trt_gather_op',
'test_seq2seq',
'test_bilinear_interp_v2_op',
'test_conv2d_transpose_op',
'test_conv3d_op',
'test_cross_entropy_loss',
'test_trilinear_interp_op',
'test_pretrained_model',
'test_post_training_quantization_mnist',
'test_collective_wait',
'test_nn_matmul_v2_grad',
'test_quant2_int8_resnet50_mkldnn',
'test_reducescatter_api',
'test_collective_sendrecv',
'test_collective_scatter',
'test_gru_op',
'test_softmax_with_cross_entropy_op',
'test_elementwise_nn_grad',
]
LOWEST_PARALLEL_JOB_NEW = [
'heter_cloud_comm_cpu_test',
'heter_server_test',
'test_scatter_op',
'test_trt_convert_hard_sigmoid',
'test_gather_op',
'test_trilinear_interp_v2_op',
]
# *=======These unittest doesn't occupy GPU memory, just run as CPU unittest=======* #
# It run 16 job each time, If it failed due to Insufficient GPU memory or CUBLAS_STATUS_ALLOC_FAILED,
# just remove it from this list.
CPU_PARALLEL_JOB = [
'test_static_save_load_large',
'version_test',
'var_type_traits_test',
'variable_test',
'unroll_array_ops_test',
'tuple_test',
'to_string_test',
'threadpool_test',
'test_zeros_op',
'test_while_op',
'test_weight_quantization_mobilenetv1',
'test_version',
'test_var_info',
'test_var_conv_2d',
'test_utils',
'test_unique_name',
'test_transpose_int8_mkldnn_op',
'test_transpose_bf16_mkldnn_op',
'test_trainer_desc',
'test_trainable',
'test_teacher_student_sigmoid_loss_op',
'test_tdm_sampler_op',
'test_tdm_child_op',
'test_sysconfig',
'test_sync_batch_norm_pass',
'test_switch',
'test_static_shape_inferrence_for_shape_tensor',
'test_static_analysis',
'test_squared_mat_sub_fuse_pass',
'test_spawn_and_init_parallel_env',
'test_slice_var',
'test_skip_layernorm_fuse_pass',
'test_simplify_with_basic_ops_pass',
'test_similarity_focus_op',
'test_shuffle_batch_op',
'test_set_bool_attr',
'test_sequence_topk_avg_pooling',
'test_sequence_scatter_op',
'test_sequence_last_step',
'test_sequence_first_step',
'test_seqpool_cvm_concat_fuse_pass',
'test_seqpool_concat_fuse_pass',
'test_seq_concat_fc_fuse_pass',
'test_selected_rows',
'test_scope',
'test_scale_matmul_fuse_pass',
'test_scaled_dot_product_attention',
'test_sampling_id_op',
'test_runtime_and_compiletime_exception',
'test_run_fluid_by_module_or_command_line',
'test_rpn_target_assign_op',
'test_row_conv',
'test_rnn_memory_helper_op',
'test_reshape_transpose_matmul_mkldnn_fuse_pass',
'test_reshape_bf16_op',
'test_require_version',
'test_requantize_mkldnn_op',
'test_repeated_fc_relu_fuse_pass',
'test_registry',
'test_reducescatter_api',
'test_reducescatter',
'test_recurrent_op',
'test_recommender_system',
'test_query_op',
'test_quantize_transpiler',
'test_quantize_mkldnn_op',
'test_quantization_mkldnn_pass',
'test_quant_int8_resnet50_mkldnn',
'test_quant_int8_mobilenetv2_mkldnn',
'test_quant_int8_mobilenetv1_mkldnn',
'test_quant_int8_googlenet_mkldnn',
'test_quant2_int8_resnet50_range_mkldnn',
'test_quant2_int8_resnet50_mkldnn',
'test_quant2_int8_resnet50_channelwise_mkldnn',
'test_quant2_int8_mobilenetv1_mkldnn',
'test_quant2_int8_mkldnn_pass',
'test_quant2_int8_ernie_mkldnn',
'test_py_reader_sample_generator',
'test_py_reader_return_list',
'test_py_reader_lod_level_share',
'test_py_reader_error_msg',
'test_pyramid_hash_op',
'test_pybind_interface',
'test_ps_dispatcher',
'test_prune',
'test_protobuf_descs',
'test_protobuf',
'test_progressbar',
'test_program_to_string',
'test_program_code',
'test_program',
'test_precision_recall_op',
'test_post_training_quantization_resnet50',
'test_post_training_quantization_mobilenetv1',
'test_post_training_quantization_mnist',
'test_positive_negative_pair_op',
'test_paddle_inference_api',
'test_origin_info',
'test_op_version',
'test_op_support_gpu',
'test_operator_desc',
'test_operator',
'test_ones_op',
'test_npair_loss_op',
'test_nn_functional_embedding_static',
'test_name_scope',
'test_naive_executor',
'test_multiprocess_dataloader_iterable_dataset_split',
'test_multiprocess_dataloader_exception',
'test_multihead_matmul_fuse_pass',
'test_multi_gru_seq_fuse_pass',
'test_multi_gru_mkldnn_op',
'test_multi_gru_fuse_pass',
'test_multiclass_nms_op',
'test_mul_int8_mkldnn_op',
'test_mkldnn_scale_matmul_fuse_pass',
'test_mkldnn_placement_pass',
'test_mkldnn_op_nhwc',
'test_mkldnn_op_inplace',
'test_mkldnn_matmul_transpose_reshape_fuse_pass',
'test_mkldnn_matmul_op_output_fuse_pass',
'test_mkldnn_cpu_bfloat16_pass',
'test_mkldnn_conv_concat_relu_mkldnn_fuse_pass',
'test_mkldnn_conv_bias_fuse_pass',
'test_mkldnn_conv_activation_fuse_pass',
'test_mine_hard_examples_op',
'test_memory_usage',
'test_matrix_nms_op',
'test_matmul_transpose_reshape_fuse_pass',
'test_matmul_mkldnn_op',
'test_matmul_bf16_mkldnn_op',
'test_match_matrix_tensor_op',
'test_lookup_table_dequant_op',
'test_logging_utils',
'test_logger',
'test_lod_tensor_array_ops',
'test_lod_tensor_array',
'test_locality_aware_nms_op',
'test_load_vars_shape_check',
'test_load_op_xpu',
'test_load_op',
'test_limit_gpu_memory',
'test_layer_norm_mkldnn_op',
'test_layer_norm_bf16_mkldnn_op',
'test_layer',
'test_is_test_pass',
'test_ir_skip_layernorm_pass',
'test_ir_graph',
'test_io_save_load',
'test_input_spec',
'test_infer_shape',
'test_infer_no_need_buffer_slots',
'test_inference_model_io',
'test_inference_api',
'test_imperative_signal_handler',
'test_imperative_numpy_bridge',
'test_imperative_group',
'test_imperative_decorator',
'test_imperative_data_loader_process',
'test_imperative_data_loader_exit_func',
'test_imperative_base',
'test_image_classification_layer',
'test_image',
'test_ifelse_basic',
'test_hsigmoid_op',
'test_hooks',
'test_hash_op',
'test_group',
'test_graph_pattern_detector',
'test_gpu_package_without_gpu_device',
'test_global_var_getter_setter',
'test_get_set_flags',
'test_generator',
'test_generate_proposal_labels_op',
'test_generate_mask_labels_op',
'test_gast_with_compatibility',
'test_fusion_squared_mat_sub_op',
'test_fusion_seqpool_cvm_concat_op',
'test_fusion_seqpool_concat_op',
'test_fusion_seqexpand_concat_fc_op',
'test_fusion_seqconv_eltadd_relu_op',
'test_fusion_repeated_fc_relu_op',
'test_fusion_lstm_op',
'test_fusion_gru_op',
'test_fusion_gru_mkldnn_op',
'test_fusion_gru_int8_mkldnn_op',
'test_fusion_gru_bf16_mkldnn_op',
'test_fused_emb_seq_pool_op',
'test_fused_embedding_fc_lstm_op',
'test_function_spec',
'test_full_op',
'test_fs_interface',
'test_fs',
'test_framework_debug_str',
'test_fp16_utils',
'test_fleet_util',
'test_fleet_unitaccessor',
'test_fleet_runtime',
'test_fleet_rolemaker_init',
'test_bf16_utils',
'test_fleet_rolemaker_4',
'test_fleet_rolemaker_3',
'test_fleet_rolemaker',
'test_fleet_nocvm_1',
'test_fleet_base_4',
'test_fleet',
'test_flags_use_mkldnn',
'test_flags_mkldnn_ops_on_off',
'test_filter_by_instag_op',
'test_fetch_var',
'test_fetch_handler',
'test_feed_fetch_method',
'test_fc_mkldnn_op',
'test_fc_elementwise_layernorm_fuse_pass_cc',
'test_fc_bf16_mkldnn_op',
'test_executor_feed_non_tensor',
'test_executor_check_feed',
'test_executor_and_use_program_cache',
'test_exception',
'test_error_clip',
'test_entry_attr2',
'test_entry_attr',
'test_embedding_eltwise_layernorm_fuse_pass',
'test_elementwise_mul_bf16_mkldnn_op',
'test_elementwise_add_bf16_mkldnn_op',
'test_eager_deletion_recurrent_op',
'test_eager_deletion_padding_rnn',
'test_eager_deletion_mnist',
'test_eager_deletion_dynamic_rnn_base',
'test_eager_deletion_conditional_block',
'test_dynrnn_static_input',
'test_dynrnn_gradient_check',
'test_dyn_rnn',
'test_dygraph_mode_of_unittest',
'test_dpsgd_op',
'test_downpoursgd',
'test_download',
'test_distributions',
'test_distributed_reader',
'test_directory_migration',
'test_detection_map_op',
'test_desc_clone',
'test_dequantize_mkldnn_op',
'test_depthwise_conv_mkldnn_pass',
'test_deprecated_memory_optimize_interfaces',
'test_default_scope_funcs',
'test_default_dtype',
'test_debugger',
'test_dataset_voc',
'test_dataset_uci_housing',
'test_dataset_imikolov',
'test_dataset_imdb',
'test_dataset_conll05',
'test_dataloader_dataset',
'test_data_generator',
'test_data_feeder',
'test_data',
'test_cudnn_placement_pass',
'test_crypto',
'test_crf_decoding_op',
'test_create_parameter',
'test_create_op_doc_string',
'test_create_global_var',
'test_cpu_quantize_placement_pass',
'test_cpu_bfloat16_placement_pass',
'test_cpu_bfloat16_pass',
'test_conv_concat_relu_mkldnn_fuse_pass',
'test_conv_bias_mkldnn_fuse_pass_cc',
'test_conv_batch_norm_mkldnn_fuse_pass',
'test_conv3d_transpose_layer',
'test_conv3d_mkldnn_op',
'test_conv3d_layer',
'test_conv2d_transpose_layer',
'test_conv2d_mkldnn_op',
'test_conv2d_layer',
'test_conv2d_int8_mkldnn_op',
'test_conv2d_bf16_mkldnn_op',
'test_context_manager',
'test_const_value',
'test_conditional_block',
'test_concat_int8_mkldnn_op',
'test_concat_bf16_mkldnn_op',
'test_compat',
'test_common_infer_shape_functions',
'test_chunk_eval_op',
'test_check_import_scipy',
'test_c_comm_init_all_op',
'test_calc_gradient',
'test_broadcast_to_op',
'test_broadcast_shape',
'test_broadcast_error',
'test_bpr_loss_op',
'test_boxps',
'test_bipartite_match_op',
'test_benchmark',
'test_beam_search_op',
'test_batch_sampler',
'test_batch_norm_act_fuse_pass',
'test_attention_lstm_op',
'test_analyzer',
'test_aligned_allocator',
'stringprintf_test',
'split_test',
'selected_rows_functor_test',
'scope_test',
'scatter_test',
'save_quant2_model_resnet50',
'save_quant2_model_gru',
'save_quant2_model_ernie',
'save_load_op_test',
'save_load_combine_op_test',
'rw_lock_test',
'reader_test',
'reader_blocking_queue_test',
'prune_test',
'program_desc_test',
'profiler_test',
'place_test',
'op_version_registry_test',
'op_tester',
'op_proto_maker_test',
'op_kernel_type_test',
'operator_test',
'operator_exception_test',
'op_debug_string_test',
'op_compatible_info_test',
'op_call_stack_test',
'no_need_buffer_vars_inference_test',
'node_test',
'nccl_context_test',
'mmap_allocator_test',
'math_function_test',
'mask_util_test',
'lod_tensor_test',
'test_check_abi',
'lodtensor_printer_test',
'test_dispatch_jit',
'inlined_vector_test',
'infer_io_utils_tester',
'graph_to_program_pass_test',
'graph_test',
'graph_helper_test',
'gather_test',
'gather_op_test',
'fused_broadcast_op_test',
'float16_test',
'exception_holder_test',
'errors_test',
'eigen_test',
'dropout_op_test',
'dlpack_tensor_test',
'dist_multi_trainer_test',
'dim_test',
'device_worker_test',
'decorator_test',
'ddim_test',
'data_type_test',
'test_check_error',
'data_layout_transform_test',
'cudnn_helper_test',
'cudnn_desc_test',
'cpu_vec_test',
'cpu_info_test',
'cow_ptr_tests',
'conditional_block_op_test',
'cipher_utils_test',
'check_reduce_rank_test',
'broadcast_op_test',
'bfloat16_test',
'complex_test',
'beam_search_decode_op_test',
'auto_growth_best_fit_allocator_test',
'assign_op_test',
'aes_cipher_test',
'test_dist_sparse_tensor_load_adagrad',
'test_dist_mnist_fp16_allreduce',
'test_dist_mnist_gradient_merge',
'test_dist_allreduce_op',
'test_parallel_dygraph_se_resnext',
'test_dist_fleet_ps9',
'test_dist_fleet_infer',
'test_dist_se_resnext_sync',
'test_dist_oneps',
'test_dist_sparse_load_ps1',
'test_dist_mnist_batch_merge',
'test_dist_fleet_ctr',
'test_dist_fleet_ps10',
'test_parallel_dygraph_transformer',
'test_dist_mnist_fleetapi',
'test_dist_sparse_tensor_load_adam',
'test_dist_fleet_ps4',
'test_dist_fleet_heter_program',
'test_parallel_dygraph_sparse_embedding_over_height',
'test_dist_sharding_save',
'test_dist_fleet_ps_gpu_ctr',
'test_dist_mnist_backward_deps',
'test_dist_fleet_heter_base',
'test_dist_sparse_tensor_load_sgd',
'test_new_group',
'test_dist_mnist_with_program',
'test_dist_mnist_pg',
'test_dist_sparse_tensor_load_rmsprop',
'test_dist_sparse_tensor_load_ftrl',
'test_dist_fleet_ps6',
'test_dist_mnist_fleet_save',
'test_dist_fleet_a_sync_optimizer_sync',
'test_dist_fleet_ps3',
'test_dist_se_resnext_nccl',
'test_parallel_dygraph_mnist',
'test_dist_fleet_a_sync_optimizer_auto_async',
'test_pipeline',
'test_dist_fleet_ps8',
'test_dist_fleet_sparse_embedding_ctr',
'test_dist_se_resnext_dgc',
'test_dist_fleet_ps7',
'test_dist_fleet_decay',
'test_dist_fleet_a_sync_optimizer_auto_geo',
'test_dist_fleet_geo',
'test_parallel_dygraph_dataparallel',
'test_dist_mnist_dgc_nccl',
'test_dist_fleet_ctr2',
'test_parallel_dygraph_unused_variables',
'test_dist_mnist_multi_comm',
'test_dist_sparse_tensor_load_momentum',
'test_gen_nccl_id_op',
'test_parallel_dygraph_sparse_embedding',
'test_dist_mnist_ring_allreduce',
'test_fleet_launch_async',
'test_dist_fleet_a_sync_optimizer_geo',
'test_auto_checkpoint',
'test_fleet_pipeline_meta_optimizer',
'test_dist_fleet_heter_ctr',
'test_fleet_graph_execution_meta_optimizer',
'test_fleet_run_random_port',
'test_dist_fleet_ps5',
'test_dist_fleet_a_sync_optimizer_auto',
'test_dist_lookup_sparse_table_fuse_ops',
'test_dist_fleet_a_sync_optimizer_async',
'test_c_comm_init_op',
'test_fleet_launch_nproc',
'test_dist_fleet_simnet',
'test_fleet_launch_cloud',
'test_dist_fleet_ps',
'test_dist_op',
'test_dist_sparse_load_ps0',
'test_dist_fleet_ps2',
'test_dist_fleet_grad_clip',
'test_custom_concat',
'test_analyzer_seq_pool1_fuse_statis',
'test_fleet_ps',
'test_analyzer_multi_model_prediction',
'test_fleet_base_3',
'test_fleet_base_2',
'test_ascend_trigger',
'test_fleet_amp_meta_optimizer',
'test_fleetrun',
'dense_table_test',
'test_fleet_recompute_meta_optimizer',
'test_fleet_fp16_allreduce_meta_optimizer',
'test_post_training_quantization_lstm_model',
'test_fleet_metric',
'test_fleet_gradient_merge_meta_optimizer',
'test_fleet_sharding_meta_optimizer',
'test_listen_and_serv_op',
'test_analyzer_zerocopytensor_tensor',
'test_collective_optimizer',
'test_analyzer_seq_pool1_compare_determine',
'test_avoid_twice_initialization',
'test_fleet_distributed_strategy',
'test_launch_coverage',
'test_sgd_op_bf16',
'test_model_cast_to_bf16',
'test_hybrid_parallel_topology',
'barrier_table_test',
'test_fleet_lamb_meta_optimizer',
'test_fleet_rolemaker_2',
'test_distributed_strategy',
'test_rnn_cudnn_params_packing',
'test_communicator_async',
'brpc_utils_test',
'test_analyzer_capi_pd_tensor',
'test_recv_save_op',
'heter_listen_and_server_test',
'test_analyzer_capi_ner',
'test_unsqueeze2_eltwise_fuse_pass_cc',
'test_dgc_optimizer',
'heter_server_test',
'test_custom_conj',
'test_fleet_private_function',
'test_fake_init_op',
'brpc_service_sparse_sgd_test',
'test_tf32_cudnn',
'test_communicator_geo',
'test_fleet_dgc_meta_optimizer',
'test_fc_fuse_pass_cc',
'test_communicator_sync',
'test_analyzer_capi',
'test_fleet_lars_meta_optimizer',
'test_communicator_half_async',
'test_fleet_localsgd_meta_optimizer',
'test_fleet_amp_init',
'test_analyzer_seq_pool1_fuse_compare_zero_copy',
'test_lookup_table_bf16_op',
'test_fleet_meta_optimizer_base',
'table_test',
'test_fleet_rolemaker_new',
'test_fleet_graph_executor',
'test_multi_out_jit',
'test_fleet_utils',
'brpc_service_dense_sgd_test',
'test_custom_linear',
'phi_test_backend',
'test_allocator',
'phi_test_data_type',
'test_slice_api',
'test_scale_api',
'test_sum_api',
'test_op_compat_sensible_pass',
'test_generate_pass_cc',
'program_utils_test',
'build_strategy_test',
'test_fc_rnn_mkldnn_fuse_pass',
'scope_guard_test',
'phi_utils_test',
'init_test',
'cpu_helper_test',
'complex_gpu_test',
'bfloat16_gpu_test',
'test_dot_dev_api',
'test_copy_dev_api',
'test_convert_utils',
'test_type_info',
'test_flatten_dev_api',
'test_storage',
'test_intrusive_ptr',
'test_dense_tensor',
'small_vector_test',
'test_framework_place_utils',
'test_reshape_api',
'test_cast_api',
'test_phi_exception',
'test_mean_api',
'test_framework_storage',
'test_fill_api',
'test_elementwise_api',
'test_dot_api',
'test_auto_parallel_api',
'test_tensor_copy_from',
'test_analyzer_capi_exp_xpu',
'test_table_printer',
'test_egr_task_autocodegen',
'test_static_save_load_bf16',
'test_parallel_executor_run_cinn',
'test_egr_task_tensor_utils',
'test_egr_task_hook',
'test_egr_task_forward_autograd',
'test_egr_task_eager_utils',
'test_egr_task_cross_batch',
'test_egr_task_backward',
'test_egr_ds_tensor_wrapper',
'test_egr_ds_grad_tensor_holder',
'test_egr_ds_auotgrad_meta',
'test_egr_ds_accumulation_node',
'test_resnet50_with_cinn',
'test_parallel_dygraph_sync_batch_norm',
'test_monitor',
'test_mkldnn_quantizer',
'test_lookup_table_v2_bf16_op',
'test_fleet_elastic_init',
'test_fleet_elastic_collective',
'test_egr_ds_eager_tensor',
'test_dataset_download',
'test_cuda_device_name_capability',
'test_cuda_cudnn_version',
'test_collective_base',
'test_collective_api_base',
'test_backward_infer_var_data_type_shape',
'test_auto_parallel_cluster',
'test_analyzer_lac',
'test_analyzer_capi_exp_pd_config',
'test_analyzer_capi_exp_ner',
'string_helper_test',
'preprocess_local_imagenet',
'paddle_infer_api_errors_test',
'test_split_bf16_mkldnn_op',
'test_scale_bf16_mkldnn_op',
'test_ir_generate_pass',
'test_expand_v2_mkldnn_op',
'test_elementwise_sub_mkldnn_op',
]
# It run 4 job each time, If it failed due to Insufficient GPU memory or CUBLAS_STATUS_ALLOC_FAILED,
# just remove it from this list.
TETRAD_PARALLEL_JOB = [
'timer_test',
'var_type_inference_test',
'pass_test',
'graph_node_test',
'test_assert',
'test_nce',
'buffered_allocator_test',
'allocator_facade_frac_flags_test',
'cuda_helper_test',
'test_auto_growth_gpu_memory_limit',
'device_context_test',
'test_reference_count_pass_last_lived_ops',
'copy_same_tensor_test',
'test_mixed_vector',
'op_registry_test',
'test_prepare_op',
'data_device_transform_test',
'test_naive_best_fit_gpu_memory_limit',
'test_imperative_using_non_zero_gpu',
'retry_allocator_test',
'system_allocator_test',
'test_fc_lstm_fuse_pass_cc',
'test_fc_gru_fuse_pass_cc',
'test_conv_bn_fuse_pass_cc',
'test_adaptive_pool2d_convert_global_pass',
'test_fc_act_mkldnn_fuse_pass',
'test_fleet_cc',
'tensor_test',
'test_repeated_fc_relu_fuse_pass_cc',
'test_mkldnn_caching',
'test_analyzer_seq_pool1',
'test_analyzer_ocr',
'test_analyzer_seq_conv1',
'test_analyzer_mobilenet_depthwise_conv',
'test_analyzer_pyramid_dnn',
'test_analyzer_rnn2',
'test_analyzer_resnet50',
'test_analyzer_ner',
'test_analyzer_mobilenet_transpose',
'test_analyzer_rnn1',
'test_analyzer_seq_pool1_profile',
'test_analyzer_paddletensor_tensor',
'test_analyzer_bert',
'test_analyzer_googlenet',
'test_fleet_base',
'test_dgc_momentum_op',
'test_memcpy_op',
'test_dgc_op',
'test_lookahead',
'test_new_group_api',
'test_collective_split_embedding_none_divisible',
'test_collective_wait',
'test_collective_split_row_linear',
'test_collective_split_embedding',
'float16_gpu_test',
'test_leaky_relu_grad_grad_functor',
'selected_rows_functor_gpu_test',
'test_imperative_framework',
'selected_rows_test',
'test_conv_elementwise_add_mkldnn_fuse_pass',
'test_cpu_quantize_pass',
'jit_kernel_test',
'test_conv_activation_mkldnn_fuse_pass',
'test_trt_conv3d_op',
'test_parallel_executor_drop_scope',
'test_tensorrt_engine',
'test_parallel_executor_mnist',
'test_load_state_dict_from_old_format',
'test_fuse_elewise_add_act_pass',
'test_fetch_unmerged',
'test_randint_op',
'test_standalone_controlflow',
'test_standalone_multiply_write',
'test_reshape_op',
'test_parallel_executor_fetch_isolated_var',
'test_inplace_abn_op',
'test_fused_transformer_encoder_layer',
'test_eager_deletion_while_op',
'test_dataloader_unkeep_order',
'test_parallel_executor_profiler',
'test_correlation',
'test_ir_inplace_pass',
'test_moving_average_abs_max_scale_op',
'test_flatten_contiguous_range_op',
'test_transforms',
'test_sum_op',
'test_scatter_op',
'test_parallel_executor_pg',
'test_mix_precision_all_reduce_fuse',
'test_tensorrt_engine_op',
'test_zeropad2d',
'test_isclose_op',
'test_weight_decay',
'test_async_read_write',
'test_allclose_op',
'test_op_function_generator',
'test_dynamic_rnn_stop_gradient',
'test_mnist',
'test_api_impl',
'test_mnist_amp',
'test_py_reader_using_executor',
'test_mnist_pure_fp16',
'test_py_func_op',
'test_rmsprop_op',
'test_jit_save_load',
'test_asp_optimize',
'test_tensor_zero_',
'test_pass_builder',
'test_read_file',
'test_print_op',
'test_multiprocess_dataloader_iterable_dataset_static',
'test_pool3d_api',
'test_imperative_trace_non_persistable_inputs',
'test_executor_return_tensor_not_overwriting',
'test_density_prior_box_op',
'test_dataloader_keep_order',
'test_bce_loss',
'test_simnet_v2',
'test_fetch_lod_tensor_array',
'test_smooth_l1_loss',
'test_matrix_rank_op',
'test_margin_cross_entropy_op',
'test_elementwise_pow_op',
'test_qr_op',
'test_dygraph_spectral_norm',
'test_cumsum_op',
'test_atan2_op',
'test_tensor_fill_',
'test_std_layer',
'test_squeeze_op',
'test_split_op',
'test_sign_op',
'test_set_value_op',
'test_searchsorted_op',
'test_run_program_op',
'test_randperm_op',
'test_randint_like',
'test_pylayer_op',
'test_pow2_decay_with_linear_warmup_op',
'test_pow',
'test_paddle_imperative_double_grad',
'test_optimizer_for_varbase',
'test_onnx_export',
'test_normalize',
'test_norm_all',
'test_nn_sigmoid_op',
'test_nn_margin_rank_loss',
'test_mv_op',
'test_multihead_attention',
'test_mse_loss',
'test_modelaverage',
'test_min_op',
'test_metrics',
'test_merged_momentum_op',
'test_median',
'test_math_op_patch_var_base',
'test_layer_norm_op_v2',
'test_label_smooth_functional',
'test_instance_norm_op',
'test_imperative_recurrent_usage',
'test_imperative_container_sequential',
'test_imperative_container_layerlist',
'test_imperative_container_layerdict',
'test_group_norm_op_v2',
'test_gelu_op',
'test_faster_tokenizer_op',
'test_expand_as_op',
'test_digamma_op',
'test_diff_op',
'test_diagonal_op',
'test_diagflat',
'test_determinant_op',
'test_deform_conv2d',
'test_conv_transpose_nn_grad',
'test_conj_op',
'test_complex_reshape',
'test_chunk_op',
'test_bincount_op',
'test_beam_search_decode_op',
'test_arg_min_max_v2_op',
'test_angle_op',
'test_adamw_op',
'test_adamax_api',
'test_sparse_momentum_op',
'test_softmax_mask_fuse_op',
'test_paddle_save_load_binary',
'test_ops_roi_align',
'test_nonzero_api',
'test_nll_loss',
'test_neg_op',
'test_graph_send_recv_op',
'test_fill_constant_op',
'test_distribution',
'test_compiled_program',
'test_compare_op',
'test_bitwise_op',
'test_bce_with_logits_loss',
'test_adaptive_avg_pool3d',
'test_yolo_box_op',
'test_feed_data_check_shape_type',
'test_asp_pruning_2d_greedy',
'test_asp_pruning_1d',
'test_activation_bf16_mkldnn_op',
'test_erf_op',
'test_trt_affine_channel_op',
'test_reinforcement_learning',
'test_transfer_dtype_op',
'test_yolov3_loss_op',
'test_where_index',
'test_variance_layer',
'test_unsqueeze_op',
'test_translated_layer',
'test_tensor_shape',
'test_slice',
'test_save_inference_model',
'test_return',
'test_print',
'test_loop',
'test_logical',
'test_list',
'test_imperative_ocr_attention_model',
'test_ifelse',
'test_for_enumerate',
'test_declarative',
'test_convert_call',
'test_cast',
'test_break_continue',
'test_vjp_jvp',
'test_unique_consecutive_op',
'test_save_load',
'test_partial_program',
'test_pool2d_api',
'test_dlpack',
'test_complex_variable',
'test_cuda_graph',
'test_cuda_graph_static_mode',
'test_custom_grad_input',
'test_accuracy_op',
'test_pool1d_api',
'test_imperative_selected_rows',
'test_tf32_cublas',
'test_l1_loss',
'test_adaptive_avg_pool2d',
'test_select_input_output_op',
'test_max_op',
'test_variable_trans_func',
'test_param_guard',
'test_share_data_op',
'test_multiply',
'test_lambda',
'test_prod_op',
'test_fused_attention_op_api',
'test_fused_bias_dropout_residual_layer_norm_op',
'test_fused_bias_dropout_residual_layer_norm_op_api',
'test_complex_grad_accumulated',
'test_deg2rad',
'test_lgamma_op',
'test_get_tensor_from_selected_rows_op',
'test_complex_abs',
'test_subtract_op',
'test_complex_elementwise_layers',
'test_marker_op',
'test_typing',
'test_cuda_empty_cache',
'test_randn_op',
'test_maximum_op',
'test_conv2d_api',
'test_add_position_encoding_op',
'test_tensor_methods',
'test_imperative_partitial_backward',
'test_inplace_auto_generated_apis',
'test_cost_model',
'test_ops_roi_pool',
'test_real_imag_op',
'test_view_op_reuse_allocation',
'test_ast_util',
'test_ones_like',
'test_lod_array_length_op',
'test_memory_reuse_exclude_feed_var',
'test_ir_embedding_eltwise_layernorm_fuse_pass',
'test_pairwise_distance',
'test_imperative_hook_for_layer',
'test_complex_sum_layer',
'test_complex_cast',
'test_complex_kron',
'test_complex_trace_layer',
'test_merge_selectedrows_op',
'test_viterbi_decode_op',
'test_square_error_cost',
'test_lod_tensor',
'test_array_read_write_op',
'test_glu',
'test_nn_dice_loss',
'data_type_transform_test',
'test_tracer',
'test_elementwise_div_grad_grad',
'tensor_util_test',
'concat_test',
'math_function_gpu_test',
'malloc_test',
'test_elementwise_add_grad_grad',
'transform_test',
'strided_memcpy_test',
'test_gradient_accmulator',
'test_fused_residual_dropout_bias',
'test_elementwise_add_op_inplace',
'lod_tensor_gpu_test',
'device_event_test',
'copy_cross_scope_test',
'test_fused_layernorm_residual_dropout_bias',
'test_fused_dropout_act_bias',
'test_tensorrt',
'test_phi_tensor',
'test_matmul_api',
'test_to_api',
'beam_search_test',
'test_tensor_to_list',
'test_identity_op',
'test_eigvals_op',
'test_functional_conv1d_transpose',
'test_Tensor_type',
'test_analyzer_capi_exp_gpu',
'test_ir_subgraph_python_interface',
'test_memory_analysis',
'test_functional_conv1d',
'test_op_converter',
'cost_model_test',
'enforce_test',
'test_cpu_quantize_squash_pass',
'test_mkldnn_quantizer_config',
'test_cast_dev_api',
'test_scale_dev_api',
'test_mean_dev_api',
'test_sum_dev_api',
'test_reshape_dev_api',
'test_elementwise_dev_api',
]
# It run 2 job each time, If it failed due to Insufficient GPU memory or CUBLAS_STATUS_ALLOC_FAILED,
# just remove it from this list.
TWO_PARALLEL_JOB = [
'test_callback_visualdl',
'test_sequential',
'test_lambv2_op',
'test_math_op_patch',
'test_tensor_to_numpy',
'zero_copy_tensor_test',
'sequence_pooling_test',
'sequence_padding_test',
'vol2col_test',
'convert_model2dot_ernie',
'im2col_test',
'test_logical_op',
'test_imperative_deepcf',
'test_cholesky_op',
'test_sample_logits_op',
'test_ir_fc_fuse_pass',
'test_fleet_base_single',
'test_multiprocess_dataloader_iterable_dataset_dynamic',
'test_fill_op',
'test_slice_op',
'test_cond',
'test_ema',
'test_ema_fleet',
'test_nan_inf',
'test_isinstance',
'test_box_clip_op',
'test_seed_op',
'test_pool2d_int8_mkldnn_op',
'test_adagrad_op_v2',
'test_nn_functional_hot_op',
'test_op_name_conflict',
'test_imperative_gan',
'test_amp_check_finite_and_scale_op',
'test_random_seed',
'test_histogram_op',
'test_sequence_conv',
'test_eye_op',
'test_row_conv_op',
'test_full_like_op',
'test_optimizer_in_control_flow',
'test_gru_unit_op',
'test_distribute_fpn_proposals_op',
'test_log_loss_op',
'test_adadelta_op',
'test_diag_embed',
'test_unsqueeze2_op',
'test_fused_fc_elementwise_layernorm_op',
'test_sum_bf16_mkldnn_op',
'test_sequence_erase_op',
'test_sigmoid_cross_entropy_with_logits_op',
'test_regularizer_api',
'test_lrn_op',
'test_parallel_ssa_graph_inference_feed_partial_data',
'test_lod_reset_op',
'test_install_check',
'test_anchor_generator_op',
'test_gather_nd_op',
'test_network_with_dtype',
'test_elementwise_sub_op',
'test_assert_op',
'test_elementwise_div_op',
'test_gather_tree_op',
'test_imperative_named_members',
'test_seqconv_eltadd_relu_fuse_pass',
'test_analysis_predictor',
'test_convert_operators',
'test_add_reader_dependency',
'test_is_tensor',
'test_variable',
'test_save_model_without_var',
'test_unfold_op',
'test_conv_bn_fuse_pass',
'test_truncated_gaussian_random_op',
'test_tree_conv_op',
'test_traced_layer_err_msg',
'test_unique_with_counts',
'test_auc_single_pred_op',
'test_instance_norm_op_v2',
'test_softmax_bf16_mkldnn_op',
'test_mean_iou',
'test_sequence_slice_op',
'test_polygon_box_transform',
'test_sequence_pad_op',
'test_sequence_expand',
'test_pool2d_bf16_mkldnn_op',
'test_bilinear_api',
'test_parallel_executor_inference_feed_partial_data',
'test_initializer_nn',
'test_modified_huber_loss_op',
'test_lookup_table_op',
'test_conv1d_layer',
'test_kron_op',
'test_isfinite_v2_op',
'test_ctc_align',
'test_decayed_adagrad_op',
'test_dropout_op',
'test_functional_conv3d',
'test_flatten2_op',
'test_fsp_op',
'test_fusion_transpose_flatten_concat_op',
'test_elementwise_nn_grad',
'test_hinge_loss_op',
'test_elementwise_add_mkldnn_op',
'test_optimizer',
'test_deformable_conv_op',
'test_py_reader_push_pop',
'test_random_crop_op',
'test_shuffle_channel_op',
'test_center_loss',
'test_temporal_shift_op',
'test_case',
'test_transformer_api',
'test_adagrad_op',
'test_batch_norm_mkldnn_op',
'test_adam_op_multi_thread',
'test_adamax_op',
'test_while_loop_op',
'test_transpose_flatten_concat_fuse_pass',
'test_trace_op',
'test_backward',
'test_top_k_op',
'test_batch_fc_op',
'test_tensor_scalar_type_promotion_static',
'test_squared_l2_distance_op',
'test_bicubic_interp_op',
'test_spp_op',
'test_space_to_depth_op',
'test_callbacks',
'test_sigmoid_focal_loss_op',
'test_collect_fpn_proposals_op',
'test_sequence_unpad_op',
'test_conv1d_transpose_layer',
'test_sequence_pool',
'test_conv_elementwise_add_fuse_pass',
'test_conv_shift_op',
'test_sequence_expand_as',
'test_cos_sim_op',
'test_sequence_enumerate_op',
'test_sequence_concat',
'test_data_norm_op',
'test_decoupled_py_reader_data_check',
'test_deformable_conv_v1_op',
'test_roi_align_op',
'test_detach',
'test_rnn_cells',
'test_elementwise_floordiv_op',
'test_elementwise_min_op',
'test_reduce_op',
'test_embedding_id_stop_gradient',
'test_empty_op',
'test_py_reader_combination',
'test_expand_op',
'test_prroi_pool_op',
'test_fake_dequantize_op',
'test_prelu_op',
'test_fill_zeros_like_op',
'test_pool2d_op',
'test_gather_op',
'test_partial_concat_op',
'test_gaussian_random_op',
'test_generate_proposals_v2_op',
'test_pad_constant_like',
'test_grid_sample_function',
'test_pad2d_op',
'test_huber_loss_op',
'test_one_hot_op',
'test_normal',
'test_imperative_auto_prune',
'test_nn_grad',
'test_nearest_interp_op',
'test_minus_op',
'test_imperative_reinforcement',
'test_maxout_op',
'test_matmul_op',
'test_increment',
'test_masked_select_op',
'test_lstmp_op',
'test_label_smooth_op',
'test_logsumexp',
'test_log_softmax',
'test_learning_rate_scheduler',
'test_linspace',
'test_linear_interp_op',
'test_lamb_op',
'test_lookup_table_v2_op',
'test_l1_norm_op',
'test_lstm_op',
'test_margin_rank_loss_op',
'test_index_sample_op',
'test_imperative_save_load',
'test_imperative_ptb_rnn_sorted_gradient',
'test_mul_op',
'test_imperative_lod_tensor_to_selected_rows',
'test_imperative_data_parallel',
'test_norm_nn_grad',
'test_im2sequence_op',
'test_one_hot_v2_op',
'test_grid_sampler_op',
'test_pad_op',
'test_generate_proposals_op',
'test_parameter',
'test_gaussian_random_mkldnn_op',
'test_partial_sum_op',
'test_ftrl_op',
'test_flip',
'test_pool_max_op',
'test_prior_box_op',
'test_fake_quantize_op',
'test_proximal_gd_op',
'test_expand_v2_op',
'test_psroi_pool_op',
'test_expand_as_v2_op',
'test_ptb_lm_v2',
'test_rand_op',
'test_empty_like_op',
'test_rank_loss_op',
'test_elementwise_mod_op',
'test_elementwise_max_op',
'test_retain_graph',
'test_edit_distance_op',
'test_reverse_op',
'test_device_guard',
'test_rnn_cells_static',
'test_deformable_psroi_pooling',
'test_roi_perspective_transform_op',
'test_segment_ops',
'test_cvm_op',
'test_selu_op',
'test_cross_op',
'test_crop_tensor_op',
'test_sequence_mask',
'test_conv_elementwise_add2_act_fuse_pass',
'test_sequence_reshape',
'test_conv2d_fusion_op',
'test_sequence_softmax_op',
'test_compare_reduce_op',
'test_clip_by_norm_op',
'test_box_coder_op',
'test_smooth_l1_loss_op',
'test_bilinear_interp_op',
'test_spectral_norm_op',
'test_sum_mkldnn_op',
'test_batch_norm_op',
'test_base_layer',
'test_argsort_op',
'test_arg_min_max_op',
'test_transpose_op',
'test_affine_grid_op',
'test_unpool_op',
'test_addmm_op',
'test_adam_optimizer_fp32_fp64',
'test_auc_op',
'test_adam_op',
'test_bilinear_tensor_product_op',
'test_transpose_mkldnn_op',
'test_cast_op',
'test_scatter_nd_op',
'test_conv2d_transpose_op_depthwise_conv',
'test_queue',
'test_cross_entropy_op',
'test_detection',
'test_elementwise_mul_mkldnn_op',
'test_grid_generator',
'test_functional_conv2d',
'test_fit_a_line',
'test_fill_any_like_op',
'test_functional_conv2d_transpose',
'test_functional_conv3d_transpose',
'test_dot_op',
'test_device',
'test_imperative_layer_apply',
'test_dataloader_early_reset',
'test_imperative_selected_rows_to_lod_tensor',
'test_crop_op',
'test_linear_interp_v2_op',
'test_lr_scheduler',
'test_tensor_array_to_tensor',
'test_mean_op',
'test_momentum_op',
'test_iou_similarity_op',
'test_optimizer_grad',
'test_dygraph_weight_norm',
'test_batch_norm_op_v2',
'test_pool2d_mkldnn_op',
'test_regularizer',
'test_sequence_reverse',
'test_shape_op',
'test_diag',
'test_strided_slice_op',
'test_switch_case',
'test_target_assign_op',
'test_isfinite_op',
'test_conv_elementwise_add_act_fuse_pass',
'test_unbind_op',
'test_size_op',
'test_unique',
'test_unstack_op',
'test_wrappers',
'test_deprecated_decorator',
'test_affine_channel_op',
'test_arange',
'test_lrn_mkldnn_op',
'test_imperative_gnn',
'test_dequantize_abs_max_op',
'test_elementwise_mul_op',
'test_tensor_scalar_type_promotion_dynamic',
'test_fc_op',
'test_mish_op',
'test_flatten_op',
'test_gradient_clip',
'test_allclose_layer',
'test_meshgrid_op',
'test_get_places_op',
'test_reader_reset',
'test_squared_l2_norm_op',
'test_softmax_mkldnn_op',
'test_numel_op',
'test_squeeze2_op',
'test_dygraph_mnist_fp16',
'test_activation_mkldnn_op',
'test_imperative_layer_children',
'test_nearest_interp_v2_op',
'test_fill_zeros_like2_op',
'test_sync_batch_norm_op',
'test_static_save_load',
'test_coalesce_tensor_op',
'test_fuse_bn_act_pass',
'test_shard_index_op',
'test_cuda_random_seed',
'test_dequantize_log_op',
'test_mkldnn_batch_norm_act_fuse_pass',
'test_imperative_skip_op',
'test_proximal_adagrad_op',
'test_conv2d_transpose_mkldnn_op',
'test_imperative_optimizer',
'test_assign_value_op',
'test_roi_pool_op',
'test_manual_seed',
'test_range',
'test_box_decoder_and_assign_op',
'test_imperative_optimizer_v2',
'test_python_operator_overriding',
'test_is_empty_op',
'test_py_reader_pin_memory',
'test_train_recognize_digits',
'test_parallel_executor_feed_persistable_var',
'test_update_loss_scaling_op',
'test_rnn_cell_api',
'test_imperative_load_static_param',
'test_fuse_bn_add_act_pass',
'test_quantize_transpiler_v2',
'paddle_infer_api_test',
'test_analyzer_ernie',
'lite_resnet50_test',
'lite_mul_model_test',
'test_complex_simplenet',
'test_imperative_layers',
'test_trt_convert_concat',
'test_trt_convert_affine_channel',
'test_multi_precision_fp16_train',
'test_trt_transpose_flatten_concat_fuse_pass',
'test_trt_tuned_dynamic_shape',
'test_quantization_pass',
'test_trt_fc_fuse_pass',
'test_var_base',
'trt_split_converter_test',
'test_user_defined_quantization',
'test_quantization_scale_pass',
'feed_forward_test',
'test_fuse_optimizer_pass',
'test_standalone_executor',
'test_imperative_qat_user_defined',
'test_mkldnn_fc_act_fuse_pass',
'test_cross_entropy_loss',
'test_signal',
'test_fused_feedforward_op',
'test_weight_decay_extend',
'test_fuse_relu_depthwise_conv_pass',
'test_diag_v2',
'test_tensordot',
'test_rnn_decode_api',
'test_activation_op',
'test_clip_op',
'test_imperative_ptb_rnn',
'test_trt_convert_group_norm',
'test_scale_op',
'test_tensor_fill_diagonal_',
'test_tensor_type_promotion',
'test_fill_any_op',
'test_trt_yolo_box_op',
'test_tensor_register_hook',
'test_fused_multihead_matmul_op',
'test_uniform_random_inplace_op',
'test_decoupled_py_reader',
'test_assign_op',
'test_trt_instance_norm_op',
'test_uniform_random_op',
'test_eager_deletion_delete_vars',
'test_bernoulli_op',
'test_multinomial_op',
'test_fused_elemwise_activation_op',
'test_profiler',
'test_ir_memory_optimize_pass',
'test_callback_reduce_lr_on_plateau',
'test_parallel_executor_dry_run',
'test_paddle_save_load',
'test_stack_op',
'test_overlap_add_op',
'test_frame_op',
'test_broadcast_tensors_op',
'test_pad3d_op',
'test_cumprod_op',
'trt_fc_prelu_test',
'test_sigmoid_focal_loss',
'test_pixel_shuffle',
'test_nn_matmul_v2_grad',
'test_multi_dot_op',
'test_imperative_thread_local_has_grad',
'test_bmm_op',
'test_activation_nn_grad',
'test_sgd_op',
'test_mul_nn_grad',
'test_inplace',
'test_einsum',
'test_cosine_similarity_api',
'test_seq2seq',
'test_word2vec',
'test_scale_mkldnn_op',
'test_asp_pruning_2d_best',
'test_complex_getitem',
'test_vhp',
'test_top_k_v2_op',
'test_hessian',
'test_concat_mkldnn_op',
'test_reduce_mkldnn_op',
'test_jacobian',
'test_tril_triu_op',
'test_tile_op',
'test_where_op',
'test_trunc_op',
'test_trt_dynamic_shape',
'test_split_mkldnn_op',
'test_simnet',
'test_program_translator',
'test_prelu_mkldnn_op',
'test_op_attr',
'test_grad',
'test_full_name_usage',
'test_error',
'test_elementwise_gradient_op',
'test_dict',
'test_cache_program',
'test_len',
'test_adaptive_max_pool1d',
'test_imperative_layer_trainable',
'test_rad2deg',
'test_normalization_wrapper',
'test_affine_grid_function',
'cc_imp_py_test',
'test_grad_clip_minimize',
'test_executor_and_mul',
'test_tensor',
'test_imperative_container_parameterlist',
'test_adaptive_max_pool2d',
'test_imperative_triple_grad',
'test_zeros_like_op',
'test_nn_functional_embedding_dygraph',
'test_function_hook',
'test_minimum_op',
'test_nn_quant_functional_layers',
'test_adaptive_max_pool3d',
'test_fetch_feed',
'test_sort_op',
'test_complex_transpose',
'test_imperative_parallel_coalesce_split',
'test_weight_normalization',
'test_adaptive_avg_pool1d',
'test_egr_task_fwd_bwd_joint',
'test_analyzer_transformer',
'test_analyzer_text_classification',
'test_analyzer_small_dam',
'test_analyzer_int8_mobilenetv2',
'test_analyzer_int8_mobilenetv1',
'test_analyzer_int8_googlenet',
'test_analyzer_bfloat16_resnet50',
'test_analyzer_bfloat16_mobilenetv2',
'test_analyzer_bfloat16_mobilenetv1',
'test_analyzer_quant_performance_benchmark',
'test_analyzer_int8_resnet50',
'test_analyzer_int8_mobilenet_ssd',
'test_analyzer_bfloat16_googlenet',
'test_analyzer_transformer_profile',
'test_mkldnn_softplus_activation_fuse_pass',
'test_custom_relu_op_jit',
'test_custom_relu_model',
'test_custom_attrs_jit',
'test_custom_relu_op_setup',
'test_mkldnn_matmul_v2_transpose_reshape_fuse_pass',
'workqueue_test',
'job',
'test_kernel_factory',
'test_trt_convert_slice',
'test_framework_tensor_utils',
'test_flatten_api',
'test_split_plugin',
'test_linear_chain_crf_op',
'test_callback_early_stop',
'test_inplace_and_clear_gradient',
'test_reset_grad_inplace_version',
'test_initializer',
'test_egr_ds_grad_node_info',
'test_save_inference_model_conditional_op',
'test_parallel_executor_run_load_infer_program',
'test_hapi_hub_model',
'test_get_inputs_outputs_in_block',
'test_get_device_properties',
'test_fleet_elastic_manager',
'test_fleet_ascend_utils',
'test_executor_check_fetch_list',
'test_eig_op',
'test_egr_performance_benchmark_fluid_cpu',
'test_egr_performance_benchmark_eager_cpu',
'test_datasets',
'test_dataset_wmt',
'test_dataset_movielens',
'test_dataset_consistency_inspection',
'test_dataset_cifar',
'test_cyclic_cifar_dataset',
'test_cuda_device_count',
'test_auto_parallel_graph',
'test_auto_parallel_completion_gpt',
'test_auto_parallel_completion',
'test_analyzer_transformer_fuse',
'test_analyzer_save_model',
'test_analyzer_lexical_gru_int8_multi_gru',
'test_analyzer_lexical_gru_int8',
'test_analyzer_lexical_gru_bfloat16',
'test_analyzer_lexical_gru',
'test_analyzer_detect_functional_mkldnn',
'test_analyzer_capi_exp_pd_tensor',
'test_analyzer_capi_exp_int',
'test_analyzer_capi_exp',
'preprocess_local_pascalvoc',
'test_flatten_mkldnn_op',
'test_transfer_layout_op',
'test_squeeze2_mkldnn_op',
'test_conv2d_transpose_bf16_mkldnn_op',
'test_slice_mkldnn_op',
'test_parallel_executor_seresnext_base_cpu',
'test_stack_mkldnn_op',
'test_softplus_mkldnn_op',
'test_parallel_executor_seresnext_with_reduce_cpu',
'test_nearest_interp_v2_mkldnn_op',
'test_fusion_lstm_mkldnn_op',
'test_fuse_resnet_unit',
'test_elementwise_div_mkldnn_op',
'test_uniform_random_bf16_op',
'test_reshape_mkldnn_op',
'test_reduce_bf16_mkldnn_op',
'test_parallel_executor_seresnext_with_fuse_all_reduce_cpu',
'test_nearest_interp_mkldnn_op',
'test_ir_graph_to_program_pass',
'test_fusion_lstm_int8_mkldnn_op',
'test_fusion_lstm_bf16_mkldnn_op',
'test_convert_call_generator',
'test_container',
'test_clip_mkldnn_op',
'test_cast_mkldnn_op',
'test_bilinear_interp_v2_mkldnn_op',
'test_bilinear_interp_mkldnn_op',
'test_asp_utils',
'test_tensor_fill_diagonal_tensor',
'test_tsm',
'test_imperative_transformer_sorted_gradient',
'test_apply_pass_to_program',
'test_transformer',
'test_trt_convert_multihead_matmul',
'trt_cascade_rcnn_test',
'test_trt_group_norm_op',
'test_trt_shuffle_channel_detect_pass',
'test_trt_conv3d_transpose_op',
'test_trt_flatten_op',
'test_trt_convert_depthwise_conv2d',
'test_trt_convert_conv2d',
'test_trt_convert_conv2d_fusion',
'test_trt_conv_pass',
'test_trt_reduce_mean_op',
'test_trt_scale_op',
'test_trt_reshape_op',
'test_trt_reduce_sum_op',
'test_trt_gather_op',
'trt_dynamic_shape_test',
'test_trt_dynamic_shape_ernie_ser_deser',
'test_mixed_precision',
'test_basic_api_transformation',
'test_image_classification_fp16',
'test_vision_models',
'test_trt_elementwise_op',
'test_inplace_softmax_with_cross_entropy',
'test_tensor_fill_diagonal_tensor_',
'test_norm_op',
'test_trt_pad_op',
'test_concat_op',
'test_model',
'test_trt_convert_deformable_conv',
'test_fused_attention_op',
'test_mobile_net',
'test_lstm',
'test_rnn_nets_static',
'trt_resnet50_test',
'test_resnet_pure_fp16',
'test_class_center_sample_op',
'test_bert',
'test_simple_rnn_op',
'trt_resnext_test',
'test_parallel_executor_fix_op_run_order',
'test_imperative_double_grad',
'test_cycle_gan',
'test_pretrained_model',
'test_trt_convert_instance_norm',
'test_hapi_amp',
'test_trt_convert_reduce_mean',
'test_trt_convert_prelu',
'test_trt_convert_yolo_box',
'test_trt_convert_reduce_sum',
'test_trt_convert_gelu',
'test_trt_convert_reshape',
'test_graph',
'test_trt_convert_split',
'test_bmn',
'test_trt_convert_matmul',
'test_trt_convert_nearest_interp',
'test_trt_convert_transpose',
'test_emb_eltwise_layernorm_fuse_pass',
'test_trt_convert_pad',
'test_trt_convert_gather_nd',
'test_trt_convert_tile',
'test_trt_convert_nearest_interp_v2',
'test_trt_convert_layer_norm',
'test_trt_convert_anchor_generator',
'test_trt_convert_clip',
'test_trt_convert_batch_norm',
'test_cuda_stream_event',
]
def main():
high_parallel_job = '^job$'
secondary_high_parallel_job = '^job$'
third_high_parallel_job = '^job$'
fourth_high_parallel_job = '^job$'
fifth_high_parallel_job = '^job$'
sixth_high_parallel_job = '^job$'
lowest_high_parallel_job = '^job$'
non_parallel_job = '^job$'
# sys.argv[1] may exceed max_arg_length when busybox run parallel_UT_rule in windows
BUILD_DIR = os.getcwd()
file_path = os.path.join(BUILD_DIR, 'all_ut_list')
with open(file_path, 'r') as f:
test_cases = f.read()
test_cases = test_cases.split("\n")
if platform.system() == 'Windows':
high_parallel_job_list = CPU_PARALLEL_JOB
fourth_high_parallel_job_list = TETRAD_PARALLEL_JOB
fifth_high_parallel_job_list = TWO_PARALLEL_JOB
else:
high_parallel_job_list = HIGH_PARALLEL_JOB_NEW
fourth_high_parallel_job_list = FOURTH_HIGH_PARALLEL_JOB_NEW
fifth_high_parallel_job_list = FIFTH_PARALLEL_JOB_NEW
for unittest in high_parallel_job_list:
if unittest in test_cases:
high_parallel_job = high_parallel_job + '|^' + unittest + '$'
test_cases.remove(unittest)
if platform.system() != 'Windows':
for unittest in SECONDARY_HIGH_PARALLEL_JOB_NEW:
if unittest in test_cases:
secondary_high_parallel_job = (
secondary_high_parallel_job + '|^' + unittest + '$'
)
test_cases.remove(unittest)
for unittest in THIRD_HIGH_PARALLEL_JOB_NEW:
if unittest in test_cases:
third_high_parallel_job = (
third_high_parallel_job + '|^' + unittest + '$'
)
test_cases.remove(unittest)
for unittest in SIXTH_PARALLEL_JOB_NEW:
if unittest in test_cases:
sixth_high_parallel_job = (
sixth_high_parallel_job + '|^' + unittest + '$'
)
test_cases.remove(unittest)
for unittest in LOWEST_PARALLEL_JOB_NEW:
if unittest in test_cases:
lowest_high_parallel_job = (
lowest_high_parallel_job + '|^' + unittest + '$'
)
test_cases.remove(unittest)
for unittest in fourth_high_parallel_job_list:
if unittest in test_cases:
fourth_high_parallel_job = (
fourth_high_parallel_job + '|^' + unittest + '$'
)
test_cases.remove(unittest)
for unittest in fifth_high_parallel_job_list:
if unittest in test_cases:
fifth_high_parallel_job = (
fifth_high_parallel_job + '|^' + unittest + '$'
)
test_cases.remove(unittest)
for unittest in test_cases:
non_parallel_job = non_parallel_job + '|^' + unittest + '$'
if platform.system() == 'Windows':
print(
"{};{};{};{}".format(
high_parallel_job,
fourth_high_parallel_job,
fifth_high_parallel_job,
non_parallel_job,
)
)
else:
print(
"{};{};{};{};{};{};{};{}".format(
high_parallel_job,
secondary_high_parallel_job,
third_high_parallel_job,
fourth_high_parallel_job,
fifth_high_parallel_job,
sixth_high_parallel_job,
lowest_high_parallel_job,
non_parallel_job,
)
)
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | heavengate.noreply@github.com |
2a8c8c68da8aa31cf3e069ae3d86603d00d5ec27 | 9bb7bc13aad5d822f52b0f52e31a468faa964f22 | /lcdb/helpers.py | 18cd06a08d7c1a5bf783781640739cf536a2516e | [
"MIT"
] | permissive | lcdb/lcdb-workflows | 03e9f8a5d887ac23059304d98f8abafe83644708 | ee28a42bc6021b8b82f1950144cda6e841823661 | refs/heads/master | 2021-01-21T15:07:25.121526 | 2019-04-12T21:30:51 | 2019-04-12T21:30:51 | 58,685,798 | 1 | 1 | null | 2016-08-01T13:52:35 | 2016-05-13T00:11:01 | Python | UTF-8 | Python | false | false | 2,947 | py | import os
import pandas
import yaml
from jsonschema import validate, ValidationError
from snakemake.shell import shell
def validate_config(config, schema):
schema = yaml.load(open(schema))
cfg = yaml.load(open(config))
try:
validate(cfg, schema)
except ValidationError as e:
msg = '\nPlease fix %s: %s\n' % (config, e.message)
raise ValidationError(msg)
def build_wrapper_for(source_dir, wrappers_dir):
"""
Returns a `wrapper_for` function to be used in a workflow.
Parameters
----------
:source_dir: str
Directory of the calling snakemake workflow. Typically this is obtained
with the srcdir() built-in.
:wrappers_dir: str
Directory of wrappers relative to source dir
"""
def wrapper_for(tool):
return os.path.join(source_dir, wrappers_dir, tool)
return wrapper_for
def build_params_for(config):
"""
Returns a `params_for` function to be used in a workflow.
Parameters
----------
:config: dict
The global config dictionary from a workflow
"""
def params_for(rule, key):
return config.get('rules', {}).get(rule, {}).get('params', {}).get(key, '')
return params_for
def build_threads_for(config):
"""
Returns a `threads_for` function to be used in a workflow.
Parameters
----------
:config: dict
The global config dictionary from a workflow
"""
def threads_for(rule):
return config.get('rules', {}).get(rule, {}).get('threads', None)
return threads_for
def workflow_helper_functions(config, source_dir, wrappers_dir):
"""
One-stop-shop for building helper functions.
Parameters
----------
:config: dict
The global config dictionary from a workflow
:source_dir: str
Directory of the calling snakemake workflow. Typically this is obtained
with the srcdir() built-in.
:wrappers_dir: str
Directory of wrappers relative to source dir
Returns
-------
wrappers_for, params_for, and threads_for functions.
"""
return (
build_wrapper_for(source_dir, wrappers_dir),
build_params_for(config),
build_threads_for(config),
)
def load_sampletable(filename):
"""
Load sampletable.
TODO: validation will go here.
"""
return pandas.read_table(filename, index_col=0)
def rscript(string, scriptname, log=None):
"""
Saves the string as `scriptname` and then runs it
Parameters
----------
string : str
Filled-in template to be written as R script
scriptname : str
File to save script to
log : str
File to redirect stdout and stderr to. If None, no redirection occurs.
"""
with open(scriptname, 'w') as fout:
fout.write(string)
if log:
_log = '> {0} 2>&1'.format(log)
else:
_log = ""
shell('Rscript {scriptname} {_log}')
| [
"dalerr@niddk.nih.gov"
] | dalerr@niddk.nih.gov |
8ea00d6cb58f877c3542b77e7c1d3bd1ffa1d98e | 40eea049f7e9cef38f30da90b9dd38f840eab240 | /nvij-cliprm-ec362ad713de/cliprm/backend/crawlers/shaze/shaze/spiders/shaze_spider.py | de0f74731fa3f8eaacb3ca2c07c6359ec3237ec2 | [] | no_license | pratikpoddar/clipr | ddf9a6e6ca9e50e84cd7dcc36ae732876e019da6 | a84a88d8a6eb76836b0bef4b6531919a563b1345 | refs/heads/master | 2021-05-27T21:53:03.844876 | 2014-07-18T10:52:41 | 2014-07-18T10:52:41 | 21,977,252 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,035 | py | import sys
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.contrib.spiders import CrawlSpider, Rule
from shaze.items import CliprItem
visitedIds = set()
def isProduct(url):
return url.find('/p/') >0
def getId(url):
return int((url.split('/p/')[1]).split('/')[0])
class DmozSpider(CrawlSpider):
name = "shaze"
allowed_domains = ["shaze.in"]
start_urls = [
"http://www.shaze.in"
]
rules = (
# Extract links matching '/p/' for products and '/c/' for categories.
Rule(SgmlLinkExtractor(allow=('/p/','/c/')), callback='parse_item',follow=True),
)
def parse_item(self, response):
return parser(response)
def parser(response):
link = response.url
if (not isProduct(link)):
return []
else:
prodId = getId(link)
items = []
if prodId not in visitedIds:
visitedIds.add(prodId)
site = HtmlXPathSelector(response)
item = CliprItem()
item['link'] = response.url
item['siteId'] = 'shaze'
category = site.select('//div[@class="breadcrumb"]/a/text()').extract()
category = filter(lambda x:x!="", category)
category = filter(lambda x:x.find("You are in Shop")<0, category)
if len(category) <= 1:
print "Error: breadcrumb not found"
sys.exit()
item['title'] = category[len(category) - 1 ]
item['category'] = category[:len(category)-1]
item['price'] = trimPrice(site.select('//span[@class="productPrice"]/text()').extract()[0])
item['markprice'] = item['price']
item['image'] = site.select('//div[@class="picture"]/img/@src').extract()[0]
item['description'] = site.select('//div[@class="overview"]/h2[@class="productnameOverview"]/text()').extract()[0]
item['buylink'] = item['link']
item['recid'] = ""
items.append(item)
return items
def trimPrice(price):
return int(price.replace("Rs",'').replace(".00",'').replace(".",'').replace("/",'').replace(',','').strip())
| [
"pratik.phodu@gmail.com"
] | pratik.phodu@gmail.com |
58df61c0ef8b4a460d646f62c0d3370de9ea7366 | 22a1166f2aea25c0a76d3c38114fbfe0c9ac867d | /core/update.py | 25e5862496ed240c9e66893c225099bae66e24a8 | [
"MIT"
] | permissive | z0x010/maltrail | 536ea417a2c4688c997c36ace87e9709d04b0b6f | 3a62bdb11aefb9abc224ea073e8699046b0af076 | refs/heads/master | 2021-01-19T11:52:46.605788 | 2017-02-16T10:30:45 | 2017-02-16T10:30:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,657 | py | #!/usr/bin/env python
"""
Copyright (c) 2014-2017 Miroslav Stampar (@stamparm)
See the file 'LICENSE' for copying permission
"""
import csv
import glob
import inspect
import os
import re
import sqlite3
import subprocess
import sys
import time
import urllib
import urlparse
sys.dont_write_bytecode = True
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) # to enable calling from current directory too
from core.addr import addr_to_int
from core.common import bogon_ip
from core.common import cdn_ip
from core.common import check_whitelisted
from core.common import load_trails
from core.common import retrieve_content
from core.settings import config
from core.settings import read_whitelist
from core.settings import BAD_TRAIL_PREFIXES
from core.settings import FRESH_IPCAT_DELTA_DAYS
from core.settings import LOW_PRIORITY_INFO_KEYWORDS
from core.settings import HIGH_PRIORITY_INFO_KEYWORDS
from core.settings import HIGH_PRIORITY_REFERENCES
from core.settings import IPCAT_CSV_FILE
from core.settings import IPCAT_SQLITE_FILE
from core.settings import IPCAT_URL
from core.settings import PROXIES
from core.settings import ROOT_DIR
from core.settings import TRAILS_FILE
from core.settings import USERS_DIR
def _chown(filepath):
if not subprocess.mswindows and os.path.exists(filepath):
try:
os.chown(filepath, int(os.environ.get("SUDO_UID", -1)), int(os.environ.get("SUDO_GID", -1)))
except Exception, ex:
print "[!] chown problem with '%s' ('%s')" % (filepath, ex)
def _fopen(filepath, mode="rb"):
retval = open(filepath, mode)
if "w+" in mode:
_chown(filepath)
return retval
def update_trails(server=None, force=False, offline=False):
"""
Update trails from feeds
"""
trails = {}
duplicates = {}
if server:
print "[i] retrieving trails from provided 'UPDATE_SERVER' server..."
content = retrieve_content(server)
if not content:
exit("[!] unable to retrieve data from '%s'" % server)
else:
with _fopen(TRAILS_FILE, "w+b") as f:
f.write(content)
trails = load_trails()
trail_files = set()
for dirpath, dirnames, filenames in os.walk(os.path.abspath(os.path.join(ROOT_DIR, "trails"))) :
for filename in filenames:
trail_files.add(os.path.abspath(os.path.join(dirpath, filename)))
if config.CUSTOM_TRAILS_DIR:
for dirpath, dirnames, filenames in os.walk(os.path.abspath(os.path.join(ROOT_DIR, os.path.expanduser(config.CUSTOM_TRAILS_DIR)))) :
for filename in filenames:
trail_files.add(os.path.abspath(os.path.join(dirpath, filename)))
try:
if not os.path.isdir(USERS_DIR):
os.makedirs(USERS_DIR, 0755)
except Exception, ex:
exit("[!] something went wrong during creation of directory '%s' ('%s')" % (USERS_DIR, ex))
_chown(USERS_DIR)
if not trails and (force or not os.path.isfile(TRAILS_FILE) or (time.time() - os.stat(TRAILS_FILE).st_mtime) >= config.UPDATE_PERIOD or os.stat(TRAILS_FILE).st_size == 0 or any(os.stat(_).st_mtime > os.stat(TRAILS_FILE).st_mtime for _ in trail_files)):
print "[i] updating trails (this might take a while)..."
if not offline and (force or config.USE_FEED_UPDATES):
sys.path.append(os.path.abspath(os.path.join(ROOT_DIR, "trails", "feeds")))
filenames = sorted(glob.glob(os.path.join(sys.path[-1], "*.py")))
else:
filenames = []
sys.path.append(os.path.abspath(os.path.join(ROOT_DIR, "trails")))
filenames += [os.path.join(sys.path[-1], "static")]
filenames += [os.path.join(sys.path[-1], "custom")]
filenames = [_ for _ in filenames if "__init__.py" not in _]
for i in xrange(len(filenames)):
filename = filenames[i]
try:
module = __import__(os.path.basename(filename).split(".py")[0])
except (ImportError, SyntaxError), ex:
print "[x] something went wrong during import of feed file '%s' ('%s')" % (filename, ex)
continue
for name, function in inspect.getmembers(module, inspect.isfunction):
if name == "fetch":
print(" [o] '%s'%s" % (module.__url__, " " * 20 if len(module.__url__) < 20 else ""))
sys.stdout.write("[?] progress: %d/%d (%d%%)\r" % (i, len(filenames), i * 100 / len(filenames)))
sys.stdout.flush()
try:
results = function()
for item in results.items():
if item[0].startswith("www.") and '/' not in item[0]:
item = [item[0][len("www."):], item[1]]
if item[0] in trails:
if item[0] not in duplicates:
duplicates[item[0]] = set((trails[item[0]][1],))
duplicates[item[0]].add(item[1][1])
if not (item[0] in trails and (any(_ in item[1][0] for _ in LOW_PRIORITY_INFO_KEYWORDS) or trails[item[0]][1] in HIGH_PRIORITY_REFERENCES)) or (item[1][1] in HIGH_PRIORITY_REFERENCES and "history" not in item[1][0]) or any(_ in item[1][0] for _ in HIGH_PRIORITY_INFO_KEYWORDS):
trails[item[0]] = item[1]
if not results and "abuse.ch" not in module.__url__:
print "[x] something went wrong during remote data retrieval ('%s')" % module.__url__
except Exception, ex:
print "[x] something went wrong during processing of feed file '%s' ('%s')" % (filename, ex)
# custom trails from remote location
if config.CUSTOM_TRAILS_URL:
print(" [o] '(remote custom)'%s" % (" " * 20))
content = retrieve_content(config.CUSTOM_TRAILS_URL)
if not content:
exit("[!] unable to retrieve data (or empty response) from '%s'" % config.CUSTOM_TRAILS_URL)
else:
url = config.CUSTOM_TRAILS_URL
url = ("http://%s" % url) if not "//" in url else url
__info__ = "(remote custom)"
__reference__ = urlparse.urlsplit(url).netloc
for line in content.split('\n'):
line = line.strip()
if not line or line.startswith('#'):
continue
line = re.sub(r"\s*#.*", "", line)
if '://' in line:
line = re.search(r"://(.*)", line).group(1)
line = line.rstrip('/')
if '/' in line:
trails[line] = (__info__, __reference__)
line = line.split('/')[0]
elif re.search(r"\A\d+\.\d+\.\d+\.\d+\Z", line):
trails[line] = (__info__, __reference__)
else:
trails[line.strip('.')] = (__info__, __reference__)
# basic cleanup
for key in trails.keys():
if key not in trails:
continue
if not key or re.search(r"\A(?i)\.?[a-z]+\Z", key) and not any(_ in trails[key][1] for _ in ("custom", "static")):
del trails[key]
continue
if re.search(r"\A\d+\.\d+\.\d+\.\d+\Z", key):
if any(_ in trails[key][0] for _ in ("parking site", "sinkhole")) and key in duplicates:
del duplicates[key]
if trails[key][0] == "malware":
trails[key] = ("potential malware site", trails[key][1])
if trails[key][0] == "ransomware":
trails[key] = ("ransomware (malware)", trails[key][1])
if key.startswith("www.") and '/' not in key:
_ = trails[key]
del trails[key]
key = key[len("www."):]
if key:
trails[key] = _
if '?' in key:
_ = trails[key]
del trails[key]
key = key.split('?')[0]
if key:
trails[key] = _
if '//' in key:
_ = trails[key]
del trails[key]
key = key.replace('//', '/')
trails[key] = _
if key != key.lower():
_ = trails[key]
del trails[key]
key = key.lower()
trails[key] = _
if key in duplicates:
_ = trails[key]
others = sorted(duplicates[key] - set((_[1],)))
if others and " (+" not in _[1]:
trails[key] = (_[0], "%s (+%s)" % (_[1], ','.join(others)))
read_whitelist()
for key in trails.keys():
if check_whitelisted(key) or any(key.startswith(_) for _ in BAD_TRAIL_PREFIXES):
del trails[key]
elif re.search(r"\A\d+\.\d+\.\d+\.\d+\Z", key) and (bogon_ip(key) or cdn_ip(key)):
del trails[key]
else:
try:
key.decode("utf8")
trails[key][0].decode("utf8")
trails[key][1].decode("utf8")
except UnicodeDecodeError:
del trails[key]
try:
if trails:
with _fopen(TRAILS_FILE, "w+b") as f:
writer = csv.writer(f, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)
for trail in trails:
writer.writerow((trail, trails[trail][0], trails[trail][1]))
except Exception, ex:
print "[x] something went wrong during trails file write '%s' ('%s')" % (TRAILS_FILE, ex)
print "[i] update finished%s" % (40 * " ")
return trails
def update_ipcat(force=False):
try:
if not os.path.isdir(USERS_DIR):
os.makedirs(USERS_DIR, 0755)
except Exception, ex:
exit("[!] something went wrong during creation of directory '%s' ('%s')" % (USERS_DIR, ex))
_chown(USERS_DIR)
if force or not os.path.isfile(IPCAT_CSV_FILE) or not os.path.isfile(IPCAT_SQLITE_FILE) or (time.time() - os.stat(IPCAT_CSV_FILE).st_mtime) >= FRESH_IPCAT_DELTA_DAYS * 24 * 3600 or os.stat(IPCAT_SQLITE_FILE).st_size == 0:
print "[i] updating ipcat database..."
try:
if PROXIES:
urllib.URLopener(PROXIES).urlretrieve(IPCAT_URL, IPCAT_CSV_FILE)
else:
urllib.urlretrieve(IPCAT_URL, IPCAT_CSV_FILE)
except Exception, ex:
print "[x] something went wrong during retrieval of '%s' ('%s')" % (IPCAT_URL, ex)
else:
try:
if os.path.exists(IPCAT_SQLITE_FILE):
os.remove(IPCAT_SQLITE_FILE)
with sqlite3.connect(IPCAT_SQLITE_FILE, isolation_level=None, check_same_thread=False) as con:
cur = con.cursor()
cur.execute("BEGIN TRANSACTION")
cur.execute("CREATE TABLE ranges (start_int INT, end_int INT, name TEXT)")
with open(IPCAT_CSV_FILE) as f:
for row in f:
if not row.startswith('#') and not row.startswith('start'):
row = row.strip().split(",")
cur.execute("INSERT INTO ranges VALUES (?, ?, ?)", (addr_to_int(row[0]), addr_to_int(row[1]), row[2]))
cur.execute("COMMIT")
cur.close()
con.commit()
except Exception, ex:
print "[x] something went wrong during ipcat database update ('%s')" % ex
_chown(IPCAT_CSV_FILE)
_chown(IPCAT_SQLITE_FILE)
def main():
try:
update_trails(force=True)
update_ipcat()
except KeyboardInterrupt:
print "\r[x] Ctrl-C pressed"
else:
if "-r" in sys.argv:
results = []
with _fopen(TRAILS_FILE) as f:
for line in f:
if line and line[0].isdigit():
items = line.split(',', 2)
if re.search(r"\A[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\Z", items[0]):
ip = items[0]
reputation = 1
lists = items[-1]
if '+' in lists:
reputation = 2 + lists.count(',')
if "(custom)" in lists:
reputation -= 1
if "(static)" in lists:
reputation -= 1
reputation -= max(0, lists.count("prox") + lists.count("maxmind") + lists.count("spys.ru") + lists.count("rosinstrument") - 1) # remove duplicate proxy hits
reputation -= max(0, lists.count("blutmagie") + lists.count("torproject") - 1) # remove duplicate tor hits
if reputation > 0:
results.append((ip, reputation))
results = sorted(results, key=lambda _: _[1], reverse=True)
for result in results:
sys.stderr.write("%s\t%s\n" % (result[0], result[1]))
sys.stderr.flush()
if __name__ == "__main__":
main()
| [
"miroslav.stampar@gmail.com"
] | miroslav.stampar@gmail.com |
075137d16c4ea177b032134a2f40c97cd6d7c5ce | 438e546e2acf5aa57c34c6481e477f7025b12e21 | /Grokking Coding Interview/P6 - In Place Reversal of LL /Reverse K Sized Sub-List.py | 2dbe7f39ba36d836a47c76fe3b67fd7d1e61a5e3 | [] | no_license | SajinKowserSK/algorithms-practice | 988537ef3537487cb40c78776dd2c9e1130cde4f | 41bbd55553747492a539b41f6e86bff5504c5842 | refs/heads/master | 2022-11-06T18:22:41.329484 | 2022-10-19T23:40:10 | 2022-10-19T23:40:10 | 206,470,288 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,784 | py | from __future__ import print_function
class Node:
def __init__(self, value, next=None):
self.value = value
self.next = next
def print_list(self):
temp = self
while temp is not None:
print(temp.value, end=" ")
temp = temp.next
print()
def reverse_every_k_elements(head, k):
trav = 1
prev = head
curr = head
lastTail = None
while curr.next:
curr = curr.next
trav += 1
if trav % k == 0:
rest = curr.next
curr.next = None
# important to note that after reversing, "prev" actually becomes the tail
# the head/start is now reversed_head
reversed_head = reverse(prev)
if lastTail is not None:
lastTail.next = reversed_head
lastTail = prev
else:
lastTail = prev
start = reversed_head
prev = rest
curr = rest
trav = 1
lastTail.next = reverse(prev)
return start
def reverse(head):
prev = None
curr = head
while curr:
nextN = curr.next
curr.next = prev
prev = curr
curr = nextN
return prev
def main():
head = Node(1)
head.next = Node(2)
head.next.next = Node(3)
head.next.next.next = Node(4)
head.next.next.next.next = Node(5)
head.next.next.next.next.next = Node(6)
head.next.next.next.next.next.next = Node(7)
head.next.next.next.next.next.next.next = Node(8)
print("Nodes of original LinkedList are: ", end='')
head.print_list()
result = reverse_every_k_elements(head, 3)
print("Nodes of reversed LinkedList are: ", end='')
result.print_list()
main()
| [
"sajinkowser@gmail.com"
] | sajinkowser@gmail.com |
ab45a62f3cdff764191fa10661d1c3a0d52c4b51 | e129fe32194ad8d15f664cd055062d01caae370f | /tools/betterbib-format | c9fd4371c24e59f8a6cbfba8255c52059f2ba91c | [
"MIT"
] | permissive | tbabej/betterbib | dd3b6895d3fd8ff4cf50b4b8e5fdcd2fb6d31216 | 80a3c9040232d9988f9a1e4c40724b40b9b9ed85 | refs/heads/master | 2020-03-11T10:45:14.243594 | 2018-04-17T18:43:30 | 2018-04-18T13:37:11 | 129,950,668 | 0 | 0 | MIT | 2018-04-17T18:49:03 | 2018-04-17T18:49:02 | null | UTF-8 | Python | false | false | 3,331 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
from __future__ import print_function, unicode_literals
import argparse
import collections
import sys
from pybtex.database.input import bibtex
import betterbib
def _main():
args = _parse_cmd_arguments()
data = bibtex.Parser().parse_file(args.infile)
# Use an ordered dictionary to make sure that the entries are written out
# sorted by their BibTeX key if demanded.
od = betterbib.decode(collections.OrderedDict(
sorted(data.entries.items())
if args.sort_by_bibkey
else data.entries.items()
))
od = _adapt_doi_urls(od, args.doi_url_type)
betterbib.write(
od, args.outfile, args.delimeter_type, tab_indent=args.tabs_indent
)
return
def _adapt_doi_urls(od, doi_url_type):
if doi_url_type == 'new':
od = _update_doi_url(od, lambda doi: 'https://doi.org/' + doi)
elif doi_url_type == 'short':
def update_to_short_doi(doi):
short_doi = betterbib.tools.get_short_doi(doi)
if short_doi:
return 'https://doi.org/' + short_doi
return None
od = _update_doi_url(od, update_to_short_doi)
else:
assert doi_url_type == 'unchanged'
return od
def _update_doi_url(od, url_from_doi):
for bib_id in od:
if 'url' in od[bib_id].fields:
doi = betterbib.tools.doi_from_url(od[bib_id].fields['url'])
if doi:
new_url = url_from_doi(doi)
if new_url:
od[bib_id].fields['url'] = new_url
return od
def _parse_cmd_arguments():
parser = argparse.ArgumentParser(description='Reformat BibTeX files.')
parser.add_argument(
'-v', '--version',
help='display version information',
action='version',
version='betterbib {}, Python {}'.format(
betterbib.__version__, sys.version
)
)
parser.add_argument(
'infile',
nargs='?',
type=argparse.FileType('r'),
default=sys.stdin,
help='input BibTeX file (default: stdin)'
)
parser.add_argument(
'outfile',
nargs='?',
type=argparse.FileType('w'),
default=sys.stdout,
help='output BibTeX file (default: stdout)'
)
parser.add_argument(
'-b', '--sort-by-bibkey',
action='store_true',
help='sort entries by BibTeX key (default: false)'
)
parser.add_argument(
'-t', '--tabs-indent',
action='store_true',
help='use tabs for indentation (default: false)'
)
parser.add_argument(
'-d', '--delimeter-type',
choices=[
'braces',
'quotes',
],
default='braces',
help=(
'which delimeters to use in the output file '
'(default: braces {...})'
),
)
parser.add_argument(
'-u', '--doi-url-type',
choices=[
'unchanged',
'new',
'short'
],
default='new',
help=(
'DOI URL (new: https://doi.org/<DOI> (default), '
'short: https://doi.org/abcde)'
),
)
return parser.parse_args()
if __name__ == '__main__':
_main()
| [
"nico.schloemer@gmail.com"
] | nico.schloemer@gmail.com | |
f30497be13679db52d80358c553ceabc0ae00929 | 17f1811abda6c828460b77f460671f9c2f464204 | /leetcode/shuffle_an_array.py | 2bcc987387591d1047a0a4164e7da89c024cb1c8 | [] | no_license | rishabhranawat/challenge | f10f69fc30881a0571c4321b466a89aeeb06e568 | e836343be5185f8843bb77197fccff250e9a77e3 | refs/heads/master | 2021-01-21T15:13:47.590675 | 2020-04-25T15:26:42 | 2020-04-25T15:26:42 | 91,833,947 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,281 | py | # Checkout Fisher Yates
import random
class Solution(object):
def __init__(self, nums):
"""
:type nums: List[int]
"""
self.nums = nums
self.pairs = {}
for i in range(0, len(nums), 1):
self.pairs[i] = nums[i]
def reset(self):
"""
Resets the array to its original configuration and return it.
:rtype: List[int]
"""
for index, value in self.pairs.items():
self.nums[index] = value
return self.nums
def shuffle(self):
"""
Returns a random shuffling of the array.
:rtype: List[int]
"""
temp = self.pairs
new = {}
current = set()
counter = 0
while(len(current) != len(self.nums)):
index = random.randint(0, len(self.nums)-1)
val = self.pairs[index]
if(val not in current):
current.add(val)
self.nums[counter] = val
counter += 1
return self.nums
# Your Solution object will be instantiated and called as such:
nums = [1, 2, 3]
obj = Solution(nums)
param_1 = obj.reset()
print(param_1)
param_2 = obj.shuffle()
print(param_2)
param_1 = obj.reset()
print(param_1) | [
"rishabhranawat12345@gmail.com"
] | rishabhranawat12345@gmail.com |
812d42b9f9e83081cc0bd88c2d1b6b5dcec3a3ab | 85e078ee3ceda5091624233ca19ba42f78747499 | /LeetCode/buy_sell_stock2.py | f9fc5f75d3de157ae7c519c73459053fbf61088c | [] | no_license | papayetoo/StudyinPython | d5e6ec0cff0e97fcc4afc8d846e3658c06eb67c2 | f686b6e08720ad4d7d57b41d24c63c4bfa64dd90 | refs/heads/master | 2021-07-22T04:05:38.993123 | 2021-02-03T14:12:26 | 2021-02-03T14:12:26 | 240,009,852 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 875 | py | class Solution:
def buySellStock(self, prices: [int]) -> int:
if len(prices) == 0:
return 0
profit = 0
pair_prices = [(index, value) for index, value in enumerate(prices)]
descending = sorted(pair_prices,
key=lambda x: x[1],
reverse=True)
ascending = sorted(pair_prices,
key=lambda x: x[1])
for d in descending:
tmp = 0
for a in ascending:
if d[0] >= a[0]:
continue
else:
if tmp < a[1] - d[1]:
tmp = a[1] - d[1]
print(tmp)
profit += tmp
return profit
if __name__ == '__main__':
prices = [1, 2, 3, 4, 5]
s = Solution()
print(s.buySellStock(prices))
| [
"rhkdgus0826@gmail.com"
] | rhkdgus0826@gmail.com |
742cf9975339908a3a686a400bc4f2e1c2447a7a | e845f7f61ff76b3c0b8f4d8fd98f6192e48d542a | /djangocg/contrib/gis/geometry/test_data.py | 1ae4fe529d1427c026130348b99afec6eac57b91 | [
"BSD-3-Clause"
] | permissive | timothyclemans/djangocg | fd150c028013cb5f53f5a3b4fdc960a07fdaaa78 | 52cf28e046523bceb5d436f8e6bf61e7d4ba6312 | refs/heads/master | 2021-01-18T13:20:13.636812 | 2012-08-31T23:38:14 | 2012-08-31T23:38:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,000 | py | """
This module has the mock object definitions used to hold reference geometry
for the GEOS and GDAL tests.
"""
import gzip
import json
import os
from djangocg.contrib import gis
from djangocg.utils import six
# This global used to store reference geometry data.
GEOMETRIES = None
# Path where reference test data is located.
TEST_DATA = os.path.join(os.path.dirname(gis.__file__), 'tests', 'data')
def tuplize(seq):
"Turn all nested sequences to tuples in given sequence."
if isinstance(seq, (list, tuple)):
return tuple([tuplize(i) for i in seq])
return seq
def strconvert(d):
"Converts all keys in dictionary to str type."
return dict([(str(k), v) for k, v in six.iteritems(d)])
def get_ds_file(name, ext):
return os.path.join(TEST_DATA,
name,
name + '.%s' % ext
)
class TestObj(object):
"""
Base testing object, turns keyword args into attributes.
"""
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
class TestDS(TestObj):
"""
Object for testing GDAL data sources.
"""
def __init__(self, name, **kwargs):
# Shapefile is default extension, unless specified otherwise.
ext = kwargs.pop('ext', 'shp')
self.ds = get_ds_file(name, ext)
super(TestDS, self).__init__(**kwargs)
class TestGeom(TestObj):
"""
Testing object used for wrapping reference geometry data
in GEOS/GDAL tests.
"""
def __init__(self, **kwargs):
# Converting lists to tuples of certain keyword args
# so coordinate test cases will match (JSON has no
# concept of tuple).
coords = kwargs.pop('coords', None)
if coords:
self.coords = tuplize(coords)
centroid = kwargs.pop('centroid', None)
if centroid:
self.centroid = tuple(centroid)
ext_ring_cs = kwargs.pop('ext_ring_cs', None)
if ext_ring_cs:
ext_ring_cs = tuplize(ext_ring_cs)
self.ext_ring_cs = ext_ring_cs
super(TestGeom, self).__init__(**kwargs)
class TestGeomSet(object):
"""
Each attribute of this object is a list of `TestGeom` instances.
"""
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, [TestGeom(**strconvert(kw)) for kw in value])
class TestDataMixin(object):
"""
Mixin used for GEOS/GDAL test cases that defines a `geometries`
property, which returns and/or loads the reference geometry data.
"""
@property
def geometries(self):
global GEOMETRIES
if GEOMETRIES is None:
# Load up the test geometry data from fixture into global.
gzf = gzip.GzipFile(os.path.join(TEST_DATA, 'geometries.json.gz'))
geometries = json.loads(gzf.read())
GEOMETRIES = TestGeomSet(**strconvert(geometries))
return GEOMETRIES
| [
"timothy.clemans@gmail.com"
] | timothy.clemans@gmail.com |
0038fd3149b3996e6eb7ac75db588001245eb691 | 2ea1fdf72317649c698105be8d84935c55007db0 | /npr_sfs/methods/ibme.py | ad09d7f2eb3c64720ad17bf007bc6edc5ebe989d | [
"MIT"
] | permissive | joepfortunato/NPR-SFS | 25987b5eda4203473059dda1cabdbbb68ecbbf29 | 15d9fd2b83d75214fa851aafcc17f970252dad32 | refs/heads/master | 2021-05-30T02:48:56.252831 | 2015-10-06T07:41:53 | 2015-10-06T07:41:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,904 | py | # -*- coding: utf-8 -*-
## @package npr_sfs.methods.ibme
#
# Image-Based Material Editing [Kahn et al. 2006].
# @author tody
# @date 2015/07/30
"""Usage: ibme.py [<input>] [-h] [-o] [-q]
<input> Input image.
-h --help Show this help.
-o --output Save output files. [default: False]
-q --quiet No GUI. [default: False]
"""
from docopt import docopt
import numpy as np
import cv2
import matplotlib.pyplot as plt
from npr_sfs.datasets.loader import dataFile
from npr_sfs.io_util.image import loadRGBA, saveNormal
from npr_sfs.cv.image import luminance, alpha
from npr_sfs.plot.window import showMaximize
from npr_sfs.cv.normal import normalizeImage, normalToColor
from npr_sfs.util.logger import getLogger
logger = getLogger(__name__)
def computeGradientNormals(D_32F, sigma=5.0):
h, w = D_32F.shape
gx = cv2.Sobel(D_32F, cv2.CV_64F, 1, 0, ksize=1)
gx = cv2.GaussianBlur(gx, (0, 0), sigma)
gy = cv2.Sobel(D_32F, cv2.CV_64F, 0, 1, ksize=1)
gy = cv2.GaussianBlur(gy, (0, 0), sigma)
T_32F = np.zeros((h, w, 3), dtype=np.float32)
T_32F[:, :, 0] = 1.0
T_32F[:, :, 2] = gx
B_32F = np.zeros((h, w, 3), dtype=np.float32)
B_32F[:, :, 1] = 1.0
B_32F[:, :, 2] = -gy
T_flat = T_32F.reshape(-1, 3)
B_flat = B_32F.reshape(-1, 3)
N_flat = np.cross(T_flat, B_flat)
N_32F = N_flat.reshape(h, w, 3)
N_32F = normalizeImage(N_32F)
return N_32F
def depthRecovery(I_32F, sigma_range=0.1, sigma_space=10,
w_base=0.9, w_detail=0.1):
BL = cv2.bilateralFilter(I_32F, -1, sigma_range, sigma_space)
DL = I_32F - BL
D_32F = w_base * BL + w_detail * DL
return D_32F
def estimateNormal(I_32F):
D_32F = depthRecovery(I_32F)
N_32F = computeGradientNormals(D_32F)
return N_32F, D_32F
def showResult(C_8U, D_32F, N_32F, A_8U):
logger.info("showResult")
plt.subplot(131)
plt.title('Original Color')
plt.imshow(C_8U)
plt.subplot(132)
plt.title('Depth')
plt.imshow(D_32F, cmap=plt.cm.gray)
plt.subplot(133)
plt.title('Estimated Normal')
plt.imshow(normalToColor(N_32F, A_8U))
showMaximize()
def saveResult(input_file, A_8U, N_32F):
logger.info("saveResult")
N_file = input_file.replace(".png", "_N.png")
saveNormal(N_file, N_32F, A_8U)
def main(input_file, output_file, quiet):
C_8U = loadRGBA(input_file)
A_8U = alpha(C_8U)
I_32F = luminance(C_8U)
N_32F, D_32F = estimateNormal(I_32F)
if output_file:
saveResult(input_file, A_8U, N_32F)
if quiet:
return
showResult(C_8U, D_32F, N_32F, A_8U)
if __name__ == '__main__':
args = docopt(__doc__)
if args['<input>']:
input_file = args['<input>']
else:
input_file = dataFile("ThreeBox")
output_file = args['--output']
quiet = args['--quiet']
main(input_file, output_file, quiet) | [
"tody411@gmail.com"
] | tody411@gmail.com |
454f3d60787fdac730e072981d6438e2503218bf | aef0a344e13f6a10f7145e8cd63a514adaa2f5a7 | /tb/irq_rate_limit/test_irq_rate_limit.py | ec8e60e9a36e56a4fd41e43d2460a8fb901e93d5 | [
"MIT"
] | permissive | alexforencich/verilog-pcie | a0ff59662e2d9cac100295b43a9b4ad374bcd406 | 75126f133318b31f226ae13ebc46a40eb52cf3ac | refs/heads/master | 2023-07-20T01:19:06.004282 | 2023-06-24T05:38:06 | 2023-06-24T05:38:06 | 164,569,208 | 765 | 223 | MIT | 2023-07-18T08:36:17 | 2019-01-08T05:28:51 | Verilog | UTF-8 | Python | false | false | 5,159 | py | #!/usr/bin/env python
"""
Copyright (c) 2022 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import itertools
import logging
import os
import cocotb_test.simulator
import cocotb
from cocotb.clock import Clock
from cocotb.triggers import RisingEdge, Timer
from cocotb.regression import TestFactory
from cocotbext.axi.stream import define_stream
IrqBus, IrqTransaction, IrqSource, IrqSink, IrqMonitor = define_stream("Irq",
signals=["index", "valid", "ready"]
)
class TB(object):
def __init__(self, dut):
self.dut = dut
self.log = logging.getLogger("cocotb.tb")
self.log.setLevel(logging.DEBUG)
cocotb.start_soon(Clock(dut.clk, 4, units="ns").start())
self.irq_source = IrqSource(IrqBus.from_prefix(dut, "in_irq"), dut.clk, dut.rst)
self.irq_sink = IrqSink(IrqBus.from_prefix(dut, "out_irq"), dut.clk, dut.rst)
dut.prescale.setimmediatevalue(0)
dut.min_interval.setimmediatevalue(0)
def set_idle_generator(self, generator=None):
if generator:
self.irq_source.set_pause_generator(generator())
def set_backpressure_generator(self, generator=None):
if generator:
self.irq_sink.set_pause_generator(generator())
async def cycle_reset(self):
self.dut.rst.setimmediatevalue(0)
await RisingEdge(self.dut.clk)
await RisingEdge(self.dut.clk)
self.dut.rst.value = 1
await RisingEdge(self.dut.clk)
await RisingEdge(self.dut.clk)
self.dut.rst.value = 0
await RisingEdge(self.dut.clk)
await RisingEdge(self.dut.clk)
async def run_test_irq(dut, idle_inserter=None, backpressure_inserter=None):
tb = TB(dut)
await tb.cycle_reset()
tb.set_idle_generator(idle_inserter)
tb.set_backpressure_generator(backpressure_inserter)
dut.prescale.setimmediatevalue(249)
dut.min_interval.setimmediatevalue(100)
tb.log.info("Test interrupts (single shot)")
for k in range(8):
await tb.irq_source.send(IrqTransaction(index=k))
for k in range(8):
irq = await tb.irq_sink.recv()
tb.log.info(irq)
assert irq.index == k
assert tb.irq_sink.empty()
await Timer(110, 'us')
assert tb.irq_sink.empty()
tb.log.info("Test interrupts (multiple)")
for n in range(5):
for k in range(8):
await tb.irq_source.send(IrqTransaction(index=k))
for k in range(8):
irq = await tb.irq_sink.recv()
tb.log.info(irq)
assert irq.index == k
assert tb.irq_sink.empty()
await Timer(99, 'us')
assert tb.irq_sink.empty()
await Timer(11, 'us')
assert not tb.irq_sink.empty()
for k in range(8):
irq = await tb.irq_sink.recv()
tb.log.info(irq)
assert irq.index == k
assert tb.irq_sink.empty()
await Timer(110, 'us')
assert tb.irq_sink.empty()
await RisingEdge(dut.clk)
await RisingEdge(dut.clk)
def cycle_pause():
return itertools.cycle([1, 1, 1, 0])
if cocotb.SIM_NAME:
for test in [
run_test_irq
]:
factory = TestFactory(test)
factory.add_option("idle_inserter", [None, cycle_pause])
factory.add_option("backpressure_inserter", [None, cycle_pause])
factory.generate_tests()
# cocotb-test
tests_dir = os.path.dirname(__file__)
rtl_dir = os.path.abspath(os.path.join(tests_dir, '..', '..', 'rtl'))
def test_irq_rate_limit(request):
dut = "irq_rate_limit"
module = os.path.splitext(os.path.basename(__file__))[0]
toplevel = dut
verilog_sources = [
os.path.join(rtl_dir, f"{dut}.v"),
]
parameters = {}
parameters['IRQ_INDEX_WIDTH'] = 11
extra_env = {f'PARAM_{k}': str(v) for k, v in parameters.items()}
sim_build = os.path.join(tests_dir, "sim_build",
request.node.name.replace('[', '-').replace(']', ''))
cocotb_test.simulator.run(
python_search=[tests_dir],
verilog_sources=verilog_sources,
toplevel=toplevel,
module=module,
parameters=parameters,
sim_build=sim_build,
extra_env=extra_env,
)
| [
"alex@alexforencich.com"
] | alex@alexforencich.com |
bdc6890d12b3d567090dc600624a8c446f355672 | cbafab192b5072b8e9150dcada8013503af40fca | /Django_Learning/admin/app01/migrations/0001_initial.py | 7eeda195e315bb1b00276f9051dfd29b6e1748aa | [] | no_license | lafitehhq/PythonProject | 928421b49ff0ea9fd536ca7769a04fe990848929 | d5d0352541a29ee070884263e7eb50160cd7b3b5 | refs/heads/master | 2021-09-06T01:48:42.971720 | 2018-02-01T11:56:50 | 2018-02-01T11:56:50 | 106,712,281 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,267 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2018-01-14 06:41
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Author',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
],
),
migrations.CreateModel(
name='AuthorDetail',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sex', models.BooleanField(choices=[(0, '男'), (1, '女')], max_length=1)),
('email', models.EmailField(max_length=254)),
('address', models.CharField(max_length=50)),
('birthday', models.DateField()),
('author', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='app01.Author')),
],
),
migrations.CreateModel(
name='Book',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('price', models.DecimalField(decimal_places=2, default=10, max_digits=5)),
('publication_date', models.DateField()),
('authors', models.ManyToManyField(to='app01.Author')),
],
),
migrations.CreateModel(
name='Book2Author',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app01.Author')),
('book', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app01.Book')),
],
),
migrations.CreateModel(
name='Publisher',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, verbose_name='名称')),
('address', models.CharField(max_length=50, verbose_name='地址')),
('city', models.CharField(max_length=60, verbose_name='城市')),
('state_province', models.CharField(max_length=30)),
('country', models.CharField(max_length=50)),
('website', models.URLField()),
],
options={
'verbose_name_plural': '出版商',
'verbose_name': '出版商',
},
),
migrations.AddField(
model_name='book',
name='publisher',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app01.Publisher'),
),
migrations.AlterUniqueTogether(
name='book2author',
unique_together=set([('author', 'book')]),
),
]
| [
"lafitehhq@126.com"
] | lafitehhq@126.com |
90b561a17cc041f5c24dc06b96de9a60de196e92 | dff5c14ce2ce94b1170c4e31b985bc23c25c72a6 | /CLASS 3/2606: 바이러스/solution.py | 88c05d9a1e3256315bd812da4654ae4a23ac2806 | [
"MIT"
] | permissive | coco-in-bluemoon/baekjoon-online-judge | 371c6afb66467d2afd28bc315afc5109fa3bd8cc | 06e14fe89e4ec5b940f2afa20bc5e4b0de08c8f6 | refs/heads/main | 2023-02-15T11:51:38.631843 | 2021-01-08T15:33:17 | 2021-01-08T15:33:17 | 302,237,533 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 952 | py | from collections import deque
def solution(num_node, edges):
START_NODE = 1
graph = {node: set() for node in range(1, num_node+1)}
for src, dst in edges:
graph[src].add(dst)
graph[dst].add(src)
visited = {node: False for node in range(1, num_node+1)}
queue = deque([START_NODE])
counter = 0
while queue:
node = queue.popleft()
if visited[node]:
continue
visited[node] = True
if node != START_NODE:
counter += 1
for adjacent_node in graph[node]:
if visited[adjacent_node]:
continue
queue.append(adjacent_node)
return counter
if __name__ == "__main__":
num_node = int(input())
num_edge = int(input())
edges = list()
for _ in range(num_edge):
src, dst = map(int, input().split())
edges.append([src, dst])
answer = solution(num_node, edges)
print(answer)
| [
"coco.in.bluemoon@gmail.com"
] | coco.in.bluemoon@gmail.com |
70feee5ad2d1a2ce6a9b66799514a767ef8dce50 | c5758c1f4c880f4530df1a5ffb4c30ee2da445ee | /pytracking/vot_ep/sk3x3/vot_wrapper_sk3x3_ep0031.py | 5753454ed67494b9ff345fda9c48e1174ac294a5 | [] | no_license | bfjei2825401/d3s | 6d662fc301181a0e3ad831b0db6111e3cf8f4097 | 32140a3c67252f0e98cbfbf6ad6d2a79267c221b | refs/heads/master | 2023-02-27T09:57:25.692878 | 2021-01-27T14:20:57 | 2021-01-27T14:20:57 | 297,217,521 | 0 | 0 | null | 2020-09-21T03:23:09 | 2020-09-21T03:23:09 | null | UTF-8 | Python | false | false | 2,459 | py | import pytracking.vot as vot
import sys
import cv2
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
from pytracking.tracker.segm_sk3x3 import SegmSK3x3
from pytracking.parameter.segm_sk3x3 import default_params_ep as vot_params
def rect_to_poly(rect):
x0 = rect[0]
y0 = rect[1]
x1 = rect[0] + rect[2]
y1 = rect[1]
x2 = rect[0] + rect[2]
y2 = rect[1] + rect[3]
x3 = rect[0]
y3 = rect[1] + rect[3]
return [x0, y0, x1, y1, x2, y2, x3, y3]
def parse_sequence_name(image_path):
idx = image_path.find('/color/')
return image_path[idx - image_path[:idx][::-1].find('/'):idx], idx
def parse_frame_name(image_path, idx):
frame_name = image_path[idx + len('/color/'):]
return frame_name[:frame_name.find('.')]
# MAIN
handle = vot.VOT("polygon")
selection = handle.region()
imagefile = handle.frame()
if not imagefile:
sys.exit(0)
params = vot_params.parameters(31)
gt_rect = [round(selection.points[0].x, 2), round(selection.points[0].y, 2),
round(selection.points[1].x, 2), round(selection.points[1].y, 2),
round(selection.points[2].x, 2), round(selection.points[2].y, 2),
round(selection.points[3].x, 2), round(selection.points[3].y, 2)]
image = cv2.cvtColor(cv2.imread(imagefile), cv2.COLOR_BGR2RGB)
sequence_name, idx_ = parse_sequence_name(imagefile)
frame_name = parse_frame_name(imagefile, idx_)
params.masks_save_path = ''
params.save_mask = False
tracker = SegmSK3x3(params)
# tell the sequence name to the tracker (to save segmentation masks to the disk)
tracker.sequence_name = sequence_name
tracker.frame_name = frame_name
tracker.initialize(image, gt_rect)
while True:
imagefile = handle.frame()
if not imagefile:
break
image = cv2.cvtColor(cv2.imread(imagefile), cv2.COLOR_BGR2RGB)
# tell the frame name to the tracker (to save segmentation masks to the disk)
frame_name = parse_frame_name(imagefile, idx_)
tracker.frame_name = frame_name
prediction = tracker.track(image)
if len(prediction) == 4:
prediction = rect_to_poly(prediction)
pred_poly = vot.Polygon([vot.Point(prediction[0], prediction[1]),
vot.Point(prediction[2], prediction[3]),
vot.Point(prediction[4], prediction[5]),
vot.Point(prediction[6], prediction[7])])
handle.report(pred_poly)
| [
"752958525@qq.com"
] | 752958525@qq.com |
3acba025f2c13a9f0caf50de16baee79e95de19e | 18ad97292b34a679b8dea8a85090541c5bbf6174 | /averageseasy.py | 97cf78596df80adb1ddda5916d7075d7163cfa81 | [] | no_license | Jyotirm0y/kattis | b941044e39dc36d169450480fc33fd33bd2e0f8e | 2b9c1819ba29419bbea3db2e8ad7851155abbb3a | refs/heads/master | 2023-05-31T21:11:38.350044 | 2021-06-12T08:21:47 | 2021-06-12T08:21:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 300 | py | t = int(input())
for _ in range(t):
input()
ncs, ne = map(int, input().split())
iqcs = list(map(int, input().split()))
iqe = list(map(int, input().split()))
sumiqcs = sum(iqcs)
sumiqe = sum(iqe)
print(sum([1 if iq*ne > sumiqe and iq*ncs < sumiqcs else 0 for iq in iqcs]))
| [
"ainunnajib@gmail.com"
] | ainunnajib@gmail.com |
7cd61cc5a2265dd40f86d8fb7e1a9c2e8cd16a39 | cdb7bb6215cc2f362f2e93a040c7d8c5efe97fde | /B/BuddyStrings.py | fe5f524ac1581380e3ab3d95645abc58736301d5 | [] | no_license | bssrdf/pyleet | 8861bbac06dfe0f0f06f6ad1010d99f8def19b27 | 810575368ecffa97677bdb51744d1f716140bbb1 | refs/heads/master | 2023-08-20T05:44:30.130517 | 2023-08-19T21:54:34 | 2023-08-19T21:54:34 | 91,913,009 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,570 | py | '''
Given two strings A and B of lowercase letters, return true if you can swap two letters in
A so the result is equal to B, otherwise, return false.
Swapping letters is defined as taking two indices i and j (0-indexed) such that i != j and
swapping the characters at A[i] and A[j]. For example, swapping at indices 0 and 2 in
"abcd" results in "cbad".
Example 1:
Input: A = "ab", B = "ba"
Output: true
Explanation: You can swap A[0] = 'a' and A[1] = 'b' to get "ba", which is equal to B.
Example 2:
Input: A = "ab", B = "ab"
Output: false
Explanation: The only letters you can swap are A[0] = 'a' and A[1] = 'b', which results
in "ba" != B.
Example 3:
Input: A = "aa", B = "aa"
Output: true
Explanation: You can swap A[0] = 'a' and A[1] = 'a' to get "aa", which is equal to B.
Example 4:
Input: A = "aaaaaaabc", B = "aaaaaaacb"
Output: true
Example 5:
Input: A = "", B = "aa"
Output: false
Constraints:
0 <= A.length <= 20000
0 <= B.length <= 20000
A and B consist of lowercase letters.
'''
class Solution(object):
def buddyStrings(self, A, B):
"""
:type A: str
:type B: str
:rtype: bool
"""
if len(A) != len(B): return False
if A == B and len(set(A)) < len(A): return True
diff = []
for i, (a, b) in enumerate(zip(A,B)):
if a != b:
diff.append(i)
return len(diff) == 2 and A[diff[1]] == B[diff[0]] and A[diff[0]] == B[diff[1]]
if __name__ == "__main__":
print(Solution().buddyStrings("ab", "ba")) | [
"merlintiger@hotmail.com"
] | merlintiger@hotmail.com |
df4409470fe736ddda0aa22479628205853deac1 | ef09e86b16f741d0f262f330fc205e493b9d9041 | /polls/migrations/0001_initial.py | d6c669eba075ac5f2bcb3e33a82f245077a8f69f | [] | no_license | sarthakbhooshan/my_first_django_app | 6813d100a90dbe556732406a5d32691c7578b9c5 | b07a50a44fb8126fedfad874d81f0cb5f287a9c1 | refs/heads/master | 2021-01-14T08:38:45.499995 | 2016-07-18T11:18:33 | 2016-07-18T11:18:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,229 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-12 05:24
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Choice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice_text', models.CharField(max_length=200)),
('votes', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question_text', models.CharField(max_length=200)),
('pub_date', models.DateTimeField(verbose_name='date published')),
],
),
migrations.AddField(
model_name='choice',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='polls.Question'),
),
]
| [
"you@example.com"
] | you@example.com |
267b81539f58299286c9d273a7fa0e636ada96e9 | b0bd3342c244ebf30ae5ab29daa078f2b39010f7 | /utils.py | a035fb3e56880250c7e067c38377ce533c431ec5 | [] | no_license | naiqili/itime_learning | 30a8af7f1234277162ccdd4c69cd9f9a4a7ab412 | d9b191bb32a7e49cb99443d7dccea5bb392aee90 | refs/heads/master | 2021-06-19T04:54:06.239320 | 2017-06-26T13:35:39 | 2017-06-26T13:35:39 | 92,792,780 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,946 | py | import csv
import operator
import os
import numpy as np
import tensorflow as tf
from os import path
DATA_DIR_MOVIELENS_100K = 'data/ml-100k'
HOME_DIR = path.expanduser('~')
PROJECTS_DIR = path.join(HOME_DIR, 'Projects')
RANKSYS_DIR = path.join(PROJECTS_DIR, 'RankSys')
RANKSYS_EXAMPLES_DIR = path.join(RANKSYS_DIR, 'RankSys-examples')
NUM_FOLD = 5
NUM_GENRE = 18
THRESHOLD_ITEM = 50
THRESHOLD_USER = 20
VALID_PROPORTION = 0.05
NP_INT_DTYPE = np.int32
NP_FLOAT_DTYPE = np.float32
TF_INT_DTYPE = tf.int32
TF_FLOAT_DTYPE = tf.float32
class Dataset(object):
def __init__(self, data_dir, num_fold=NUM_FOLD):
user_filepath = path.join(data_dir, 'users.csv')
item_filepath = path.join(data_dir, 'items.csv')
self.num_fold = num_fold
num_user = count_num_line(user_filepath)
num_item = count_num_line(item_filepath)
print('#users={}\t#items={}'.format(num_user, num_item))
self.num_user, self.num_item = num_user, num_item
datasets = []
for fold in range(num_fold):
train_filepath = path.join(data_dir, 'train{0:1d}.csv'.format(fold))
train_data = load_triple(train_filepath)
test_filepath = path.join(data_dir, 'test{0:1d}.csv'.format(fold))
test_data = load_triple(test_filepath)
valid_filepath = path.join(data_dir, 'valid{0:1d}.csv'.format(fold))
valid_data = load_triple(valid_filepath)
dataset = train_data, test_data, valid_data
datasets.append(dataset)
self.datasets = datasets
def get_dataset(self, fold):
return self.datasets[fold]
def get_cv_index(data_size, num_fold):
quotient, remainder = divmod(data_size, num_fold)
cv_sizes = []
for i in range(remainder):
cv_sizes.append(quotient + 1)
for i in range(num_fold - remainder):
cv_sizes.append(quotient)
cv_index = []
idx_start = 0
for cv_size in cv_sizes:
idx_end = idx_start + cv_size
cv_index.append((idx_start, idx_end))
idx_start = idx_end
return cv_index
def count_num_line(filepath):
num_line = 0
with open(filepath, 'r') as fin:
for line in fin.readlines():
num_line += 1
return num_line
def load_triple(filepath):
alp_elems, bet_elems, gam_elems = [], [], []
with open(filepath, 'r') as f:
for line in f.readlines():
tokens = line.split()
alp_elem = int(tokens[0])
bet_elem = int(tokens[1])
gam_elem = float(tokens[2])
alp_elems.append(alp_elem)
bet_elems.append(bet_elem)
gam_elems.append(gam_elem)
alp_elems = np.asarray(alp_elems, dtype=NP_INT_DTYPE)
bet_elems = np.asarray(bet_elems, dtype=NP_INT_DTYPE)
gam_elems = np.asarray(gam_elems, dtype=NP_FLOAT_DTYPE)
dataset = alp_elems, bet_elems, gam_elems
return dataset
def np_build_indices(row_index, col_index):
return np.concatenate((row_index[:, None], col_index[:, None]), axis=1)
def tf_build_indices(row_index, col_index):
return tf.concat([tf.expand_dims(row_index, 1), tf.expand_dims(col_index, 1)], 1)
if __name__ == '__main__':
# disable all debugging logs
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
| [
"naiqil@student.unimelb.edu.au"
] | naiqil@student.unimelb.edu.au |
b6cd96dc4083b3617cc8be9fe9c662bdcef2d60f | 37eef4cd7e0e17086fb5cd3e0dd710b43470786a | /tests/commands/test__vi_cc.py | 0f3c95eca2e8808a7538606bfb81c41e3e96a738 | [
"MIT"
] | permissive | DylanBruzenak/Vintageous | 9ffd480aeea0e5c127fec7c9eafb8b5d3acf85c7 | 022faaf22acd72d3514c74013217b7661bf10a37 | refs/heads/master | 2021-01-15T11:34:53.529087 | 2013-08-17T21:02:07 | 2013-08-17T21:02:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,755 | py | import unittest
from Vintageous.vi.constants import _MODE_INTERNAL_NORMAL
from Vintageous.vi.constants import MODE_NORMAL
from Vintageous.vi.constants import MODE_VISUAL
from Vintageous.vi.constants import MODE_VISUAL_LINE
from Vintageous.tests.commands import set_text
from Vintageous.tests.commands import add_selection
from Vintageous.tests.commands import get_sel
from Vintageous.tests.commands import first_sel
from Vintageous.tests.commands import BufferTest
class Test_vi_cc_InModeInternalNormal(BufferTest):
def testSelectsWholeLine(self):
set_text(self.view, ''.join(('foo bar\nfoo bar\nfoo bar\n',)))
add_selection(self.view, self.R((1, 2), (1, 2)))
self.view.run_command('_vi_cc_motion', {'mode': _MODE_INTERNAL_NORMAL, 'count': 1})
self.assertEqual(self.R((1, 0), (1, 7)), first_sel(self.view))
def testDeletesWholeLine(self):
set_text(self.view, ''.join(('foo bar\nfoo bar\nfoo bar\n',)))
add_selection(self.view, self.R((1, 0), (1, 7)))
self.view.run_command('_vi_cc_action', {'mode': _MODE_INTERNAL_NORMAL})
self.assertEqual(self.view.substr(self.R(0, self.view.size())), 'foo bar\n\nfoo bar\n')
def testKeepsLeadingWhitespace(self):
set_text(self.view, ''.join(('foo bar\n\t foo bar\nfoo bar\n',)))
add_selection(self.view, self.R((1, 0), (1, 10)))
self.view.run_command('_vi_cc_action', {'mode': _MODE_INTERNAL_NORMAL})
self.assertEqual(self.view.substr(self.R(0, self.view.size())), 'foo bar\n\t \nfoo bar\n')
@unittest.skip("Implement")
def testCanDeleteWithCount(self):
self.assertTrue(False)
@unittest.skip("Implement")
def testDeletedLinesAreYanked(self):
self.assertTrue(False)
| [
"guillermo.lopez@outlook.com"
] | guillermo.lopez@outlook.com |
33f784c238773201f20b7191cb092dc4657a942b | 90ac505fb14e4969cd4e7f164f8969ed2344d3e3 | /BYSL/ea.py | 9b6bb77bb599f20215c88cbaad443e730deffe10 | [] | no_license | rid47/python_basic_book | 4d08641ed802a80f5b5398c568231b366b1cf5d0 | f4a77577115b126094c9e5aac38a18bb42eeb28f | refs/heads/master | 2022-12-22T12:24:48.094483 | 2022-12-10T12:11:52 | 2022-12-10T12:11:52 | 234,990,760 | 0 | 1 | null | 2022-12-02T03:44:50 | 2020-01-20T00:45:53 | Tcl | UTF-8 | Python | false | false | 1,027 | py | class Car:
def __init__(self):
self.carFare = {'Hatchback': 30, 'Sedan': 50, 'SUV': 100}
def displayFareDetails(self):
print("Cost per day: ")
print("Hatchback: $", self.carFare['Hatchback'])
print("Sedan: $", self.carFare['Sedan'])
print("SUV: $", self.carFare['SUV'])
def calculateFare(self, typeOfCar, numberOfDays):
return self.carFare[typeOfCar] * numberOfDays
car = Car()
while True:
print("Enter 1 to display fare details")
print("Enter 2 to rent a car")
print("Enter 3 to exit")
userChoice = int(input())
if userChoice == 1:
car.displayFareDetails()
if userChoice == 2:
print("Enter the type of car you would like to borrow")
typeOfCar = input()
print("Enter the number of dasy you would like to borrow the car")
numberOfDays = int(input())
fare = car.calculateFare(typeOfCar, numberOfDays)
print("Total payable amount: $", fare)
elif userChoice == 3:
quit()
| [
"ridwanmizan@gmail.com"
] | ridwanmizan@gmail.com |
9f60fa0febd594f00a2a621ba5012a8222fc7696 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2244/60690/275105.py | 24682629ee0ab61e98762255770bf7e57e5231fe | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 311 | py | n=int(input())
def isHW(str):
for i in range(len(str)):
if str[i]!=str[len(str)-1-i]: return False
return True
def isSS(num):
for i in range(2,int(num/2)+1):
if num%i==0:
return False
return True
while isHW(str(n))==False or isSS(n)==False: n+=1
print(n) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
d8250aafde619dfeca2cbde7c29520501b8998c6 | 2e3e256bcc0086a61cbb0e082dc61290196e35d2 | /dragon/db/migration.py | f5dd52488479b98226bd4aca6080a64b3ffb9664 | [
"Apache-2.0"
] | permissive | miradam/openstack-workload-disaster-recovery | 79dcdb15ebf95d89157751c750a5dbab1557b942 | 854a3952bb9278cc08017ada97ff150b12b1c687 | refs/heads/master | 2020-03-24T15:36:46.808591 | 2016-12-15T12:32:17 | 2016-12-15T12:32:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,064 | py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Database setup and migration commands."""
from dragon.db import utils
IMPL = utils.LazyPluggable('db_backend',
sqlalchemy='dragon.db.sqlalchemy.migration')
INIT_VERSION = 0
def db_sync(version=None):
"""Migrate the database to `version` or the most recent version."""
return IMPL.db_sync(version=version)
def db_version():
"""Display the current database version."""
return IMPL.db_version()
| [
"OSHRITF@il.ibm.com"
] | OSHRITF@il.ibm.com |
ba83a9d4e1b219536ebc3008d7ca7a7053a3910f | e645ebf3b5177eb0ebedb7f239bd6e1b40bf1b07 | /ups/boost_python.cfg | b171d4be2d21271f368e3b34eb327ea98a5d77b8 | [] | no_license | lsst-dm/bp | e095cdb7412124fef39bdd8428fce70bbf0f462a | 31c0b65866d06a09575a53d0dd558320e6994a06 | refs/heads/main | 2023-07-22T11:32:48.479329 | 2023-07-10T00:30:32 | 2023-07-10T00:30:32 | 37,212,636 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,961 | cfg | # -*- python -*-
"""
Dependencies and configuration for Boost.Python
"""
import os.path
import eups
def _get_root():
"""Return the root directory of the package."""
return eups.productDir("boost")
dependencies = {
# Names of packages required to build against this package.
"required": ["boost", "python"],
# Names of packages optionally setup when building against this package.
"optional": [],
# Names of packages required to build this package, but not required to build against it.
"buildRequired": [],
# Names of packages optionally setup when building this package, but not used in building against it.
"buildOptional": [],
}
def setup(conf, products, build=False):
"""
Update an SCons environment to make use of the package.
Arguments:
conf ------ An SCons Configure context. The SCons Environment conf.env should be updated
by the setup function.
products -- A dictionary consisting of all dependencies and the return values of calls to their
setup() functions, or None if the dependency was optional and was not found.
build ----- If True, this is the product currently being built, and products in "buildRequired" and
"buildOptional" dependencies will also be present in the products dict.
"""
conf.env.PrependUnique(**paths)
if not build:
conf.env.AppendUnique(**doxygen)
for target in libs:
if target not in conf.env.libs:
conf.env.libs[target] = lib[target].copy()
else:
for lib in libs[target]:
if lib not in conf.env.libs[target]:
conf.env.libs[target].append(lib)
return {"paths": paths, "doxygen": doxygen, "libs": libs, "extra": {}}
###################################################################################################
# Variables for default implementation of setup() below; if the user provides
# a custom implementation of setup(), everything below is unnecessary.
# Packages to be added to the environment.
paths = {
# Sequence of paths to add to the include path.
"CPPPATH": [os.path.join(_get_root(), "include")],
# Sequence of paths to add to the linker path.
"LIBPATH": [os.path.join(_get_root(), "lib")],
}
doxygen = {
# Sequence of Doxygen tag files produced by this product.
"DOXYGEN_TAGFILES": [],
# Sequence of Doxygen configuration files to include in dependent products.
"DOXYGEN_INCLUDES": [],
}
# Libraries provided by the package, not including standard library prefixes or suffixes.
# Additional custom targets besides the standard "main", "python", and "test" targets may
# be provided as well.
libs = {
# Normal libraries.
"main": [],
# Libraries only linked with C++-coded Python modules.
"python": ["boost_python"],
# Libraries only linked with C++-coded unit tests.
"test": [],
}
| [
"jbosch@git.lsstcorp.org"
] | jbosch@git.lsstcorp.org |
ce3930eeb0c0f64b128f4ebacfebb3889752209a | cc5b1297398b644aa77b5f05988fda3402894fe7 | /python/seldon_core/__init__.py | f1f00629c973f60b4326a1e9a3a0741543ce1e9f | [
"Apache-2.0"
] | permissive | aarondav/seldon-core | fecf4580e96ac0c7f2a514933549b4e7c1b162e9 | 243ae299da8cebb07ba64071bf885ec0e5825a93 | refs/heads/master | 2022-01-16T07:53:00.055244 | 2019-06-21T06:55:11 | 2019-06-21T06:55:11 | 193,616,164 | 0 | 0 | Apache-2.0 | 2019-06-25T02:07:37 | 2019-06-25T02:07:36 | null | UTF-8 | Python | false | false | 96 | py | from seldon_core.version import __version__
#from seldon_core.seldon_client import SeldonClient
| [
"cc@seldon.io"
] | cc@seldon.io |
9846c30b04f991029724e3c7761741398fd0acde | 9b7ef36988860750e3a6b704254ed2aaeb3a3dc7 | /insta/forms.py | b11f0faf52a0e23179021c45bebbb59b22c7d8d5 | [] | no_license | nicky-code/instagram | ed016aef3cabed46cdff3f1c8598fb9445ea12e5 | 6d9eb31cca33ed137b730fb23cd15ea7a8482faa | refs/heads/master | 2021-09-09T14:38:38.152140 | 2019-10-22T14:21:35 | 2019-10-22T14:21:35 | 215,626,513 | 0 | 0 | null | 2021-09-08T01:22:42 | 2019-10-16T19:20:08 | Python | UTF-8 | Python | false | false | 917 | py | from django import forms
from .models import Image,Profile,Comments
class ImageForm(forms.ModelForm):
class Meta:
model = Image
exclude = ['image_name', 'likes','user','profile','comments']
# widgets = {
# 'tags': forms.CheckboxSelectMultiple(),
# }
class ProfileForm(forms.ModelForm):
class Meta:
model = Profile
exclude = ['user', 'profile','user_id']
# widgets = {
# 'tags': forms.CheckboxSelectMultiple(),
# }
class CommentForm(forms.ModelForm):
class Meta:
model = Comments
exclude = ['user_profile','image_comment']
# widgets = {
# 'tags': forms.CheckboxSelectMultiple(),
# }
class InstagramForm(forms.Form):
your_name = forms.CharField(label='First Name',max_length=30)
email = forms.EmailField(label='Email')
| [
"aline.nicole7@gmail.com"
] | aline.nicole7@gmail.com |
5a1874482abbdd857dd7f934e6aef889e1c11e38 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_commercialism.py | 9e09ea8f68ec64d83d9e926fdaf0cb983af5d158 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | py |
#calss header
class _COMMERCIALISM():
def __init__(self,):
self.name = "COMMERCIALISM"
self.definitions = [u'the principles and activity of commerce, especially those connected with profit rather than quality or doing good']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
973bf4a1bc852efaa74a10f6ea3c1548fc8bd3da | 55647a80c8b412af9df0ba3f50595cc2f29c25e6 | /res/scripts/common/Lib/plat-os2emx/IN.py | 4106b079428faa0be3a055320a5a82364e207dc1 | [] | no_license | cnsuhao/WOT-0.9.17-CT | 0035eb6070fb4fab8d8ee9f8bbc676c10d511cfb | d1f932d8cabaf8aa21708622e87f83c8d24d6451 | refs/heads/master | 2021-06-08T18:11:07.039293 | 2016-11-19T19:12:37 | 2016-11-19T19:12:37 | null | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 2,138 | py | # 2016.11.19 20:01:08 Střední Evropa (běžný čas)
# Embedded file name: scripts/common/Lib/plat-os2emx/IN.py
PAGE_SIZE = 4096
HZ = 100
MAXNAMLEN = 260
MAXPATHLEN = 260
def htonl(X):
return _swapl(X)
def ntohl(X):
return _swapl(X)
def htons(X):
return _swaps(X)
def ntohs(X):
return _swaps(X)
IPPROTO_IP = 0
IPPROTO_ICMP = 1
IPPROTO_IGMP = 2
IPPROTO_GGP = 3
IPPROTO_TCP = 6
IPPROTO_EGP = 8
IPPROTO_PUP = 12
IPPROTO_UDP = 17
IPPROTO_IDP = 22
IPPROTO_TP = 29
IPPROTO_EON = 80
IPPROTO_RAW = 255
IPPROTO_MAX = 256
IPPORT_RESERVED = 1024
IPPORT_USERRESERVED = 5000
def IN_CLASSA(i):
return long(i) & 2147483648L == 0
IN_CLASSA_NET = 4278190080L
IN_CLASSA_NSHIFT = 24
IN_CLASSA_HOST = 16777215
IN_CLASSA_MAX = 128
def IN_CLASSB(i):
return long(i) & 3221225472L == 2147483648L
IN_CLASSB_NET = 4294901760L
IN_CLASSB_NSHIFT = 16
IN_CLASSB_HOST = 65535
IN_CLASSB_MAX = 65536
def IN_CLASSC(i):
return long(i) & 3758096384L == 3221225472L
IN_CLASSC_NET = 4294967040L
IN_CLASSC_NSHIFT = 8
IN_CLASSC_HOST = 255
def IN_CLASSD(i):
return long(i) & 4026531840L == 3758096384L
IN_CLASSD_NET = 4026531840L
IN_CLASSD_NSHIFT = 28
IN_CLASSD_HOST = 268435455
def IN_MULTICAST(i):
return IN_CLASSD(i)
def IN_EXPERIMENTAL(i):
return long(i) & 3758096384L == 3758096384L
def IN_BADCLASS(i):
return long(i) & 4026531840L == 4026531840L
INADDR_ANY = 0
INADDR_LOOPBACK = 2130706433
INADDR_BROADCAST = 4294967295L
INADDR_NONE = 4294967295L
INADDR_UNSPEC_GROUP = 3758096384L
INADDR_ALLHOSTS_GROUP = 3758096385L
INADDR_MAX_LOCAL_GROUP = 3758096639L
IN_LOOPBACKNET = 127
IP_OPTIONS = 1
IP_MULTICAST_IF = 2
IP_MULTICAST_TTL = 3
IP_MULTICAST_LOOP = 4
IP_ADD_MEMBERSHIP = 5
IP_DROP_MEMBERSHIP = 6
IP_HDRINCL = 2
IP_TOS = 3
IP_TTL = 4
IP_RECVOPTS = 5
IP_RECVRETOPTS = 6
IP_RECVDSTADDR = 7
IP_RETOPTS = 8
IP_DEFAULT_MULTICAST_TTL = 1
IP_DEFAULT_MULTICAST_LOOP = 1
IP_MAX_MEMBERSHIPS = 20
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\common\Lib\plat-os2emx\IN.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.11.19 20:01:08 Střední Evropa (běžný čas)
| [
"info@webium.sk"
] | info@webium.sk |
1f5eeb1362379c5c3b4038b981bfe90f79acab37 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/contrib/cv/semantic_segmentation/Ultra-Fast-Lane-Detection/scripts/convert_tusimple.py | 112ae4d113e7480f3b05e412d7499966a93165a3 | [
"MIT",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 6,498 | py | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import cv2
import tqdm
import numpy as np
import pdb
import json, argparse
def calc_k(line):
"""
Calculate the direction of lanes
"""
line_x = line[::2]
line_y = line[1::2]
length = np.sqrt((line_x[0] - line_x[-1]) ** 2 + (line_y[0] - line_y[-1]) ** 2)
if length < 90:
return -10 # if the lane is too short, it will be skipped
p = np.polyfit(line_x, line_y, deg=1)
rad = np.arctan(p[0])
return rad
def draw(im, line, idx, show=False):
'''
Generate the segmentation label according to json annotation
'''
line_x = line[::2]
line_y = line[1::2]
pt0 = (int(line_x[0]), int(line_y[0]))
if show:
cv2.putText(im, str(idx), (int(line_x[len(line_x) // 2]), int(line_y[len(line_x) // 2]) - 20),
cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 255, 255), lineType=cv2.LINE_AA)
idx = idx * 60
for i in range(len(line_x) - 1):
cv2.line(im, pt0, (int(line_x[i + 1]), int(line_y[i + 1])), (idx,), thickness=16)
pt0 = (int(line_x[i + 1]), int(line_y[i + 1]))
def get_tusimple_list(root, label_list):
'''
Get all the files' names from the json annotation
'''
label_json_all = []
for l in label_list:
l = os.path.join(root, l)
label_json = [json.loads(line) for line in open(l).readlines()]
label_json_all += label_json
names = [l['raw_file'] for l in label_json_all]
h_samples = [np.array(l['h_samples']) for l in label_json_all]
lanes = [np.array(l['lanes']) for l in label_json_all]
line_txt = []
for i in range(len(lanes)):
line_txt_i = []
for j in range(len(lanes[i])):
if np.all(lanes[i][j] == -2):
continue
valid = lanes[i][j] != -2
line_txt_tmp = [None] * (len(h_samples[i][valid]) + len(lanes[i][j][valid]))
line_txt_tmp[::2] = list(map(str, lanes[i][j][valid]))
line_txt_tmp[1::2] = list(map(str, h_samples[i][valid]))
line_txt_i.append(line_txt_tmp)
line_txt.append(line_txt_i)
return names, line_txt
def generate_segmentation_and_train_list(root, line_txt, names):
"""
The lane annotations of the Tusimple dataset is not strictly in order, so we need to find out the correct lane order for segmentation.
We use the same definition as CULane, in which the four lanes from left to right are represented as 1,2,3,4 in segentation label respectively.
"""
train_gt_fp = open(os.path.join(root, 'train_gt.txt'), 'w')
for i in tqdm.tqdm(range(len(line_txt))):
tmp_line = line_txt[i]
lines = []
for j in range(len(tmp_line)):
lines.append(list(map(float, tmp_line[j])))
ks = np.array([calc_k(line) for line in lines]) # get the direction of each lane
k_neg = ks[ks < 0].copy()
k_pos = ks[ks > 0].copy()
k_neg = k_neg[k_neg != -10] # -10 means the lane is too short and is discarded
k_pos = k_pos[k_pos != -10]
k_neg.sort()
k_pos.sort()
label_path = names[i][:-3] + 'png'
label = np.zeros((720, 1280), dtype=np.uint8)
bin_label = [0, 0, 0, 0]
if len(k_neg) == 1: # for only one lane in the left
which_lane = np.where(ks == k_neg[0])[0][0]
draw(label, lines[which_lane], 2)
bin_label[1] = 1
elif len(k_neg) == 2: # for two lanes in the left
which_lane = np.where(ks == k_neg[1])[0][0]
draw(label, lines[which_lane], 1)
which_lane = np.where(ks == k_neg[0])[0][0]
draw(label, lines[which_lane], 2)
bin_label[0] = 1
bin_label[1] = 1
elif len(k_neg) > 2: # for more than two lanes in the left,
which_lane = np.where(ks == k_neg[1])[0][0] # we only choose the two lanes that are closest to the center
draw(label, lines[which_lane], 1)
which_lane = np.where(ks == k_neg[0])[0][0]
draw(label, lines[which_lane], 2)
bin_label[0] = 1
bin_label[1] = 1
if len(k_pos) == 1: # For the lanes in the right, the same logical is adopted.
which_lane = np.where(ks == k_pos[0])[0][0]
draw(label, lines[which_lane], 3)
bin_label[2] = 1
elif len(k_pos) == 2:
which_lane = np.where(ks == k_pos[1])[0][0]
draw(label, lines[which_lane], 3)
which_lane = np.where(ks == k_pos[0])[0][0]
draw(label, lines[which_lane], 4)
bin_label[2] = 1
bin_label[3] = 1
elif len(k_pos) > 2:
which_lane = np.where(ks == k_pos[-1])[0][0]
draw(label, lines[which_lane], 3)
which_lane = np.where(ks == k_pos[-2])[0][0]
draw(label, lines[which_lane], 4)
bin_label[2] = 1
bin_label[3] = 1
cv2.imwrite(os.path.join(root, label_path), label)
train_gt_fp.write(names[i] + ' ' + label_path + ' ' + ' '.join(list(map(str, bin_label))) + '\n')
train_gt_fp.close()
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--root', required=True, help='The root of the Tusimple dataset')
return parser
if __name__ == "__main__":
args = get_args().parse_args()
# training set
names, line_txt = get_tusimple_list(args.root,
['label_data_0601.json', 'label_data_0531.json', 'label_data_0313.json'])
# generate segmentation and training list for training
generate_segmentation_and_train_list(args.root, line_txt, names)
# testing set
names, line_txt = get_tusimple_list(args.root, ['test_tasks_0627.json'])
# generate testing set for testing
with open(os.path.join(args.root, 'test.txt'), 'w') as fp:
for name in names:
fp.write(name + '\n')
| [
"wangjiangben@huawei.com"
] | wangjiangben@huawei.com |
8deed70e29027f1dfb4b831b5f3bedafcc194e64 | 5ee5853eb335fcf575d4344366ef9b4bce03570d | /pr1658m/min_operations.py | 2008296fd086a4c60d9774021e9bc8ae9d23e3b3 | [
"MIT"
] | permissive | l33tdaima/l33tdaima | 15463fb2f8d61286a4a3a7bacaaee2ab1f7c4f43 | f35305c618b383a79d05074d891cf0f7acabd88f | refs/heads/main | 2023-07-20T21:52:26.330301 | 2023-07-19T02:30:22 | 2023-07-19T02:30:22 | 99,509,451 | 1 | 0 | MIT | 2018-10-31T15:10:49 | 2017-08-06T19:44:29 | JavaScript | UTF-8 | Python | false | false | 772 | py | from typing import List
class Solution:
def minOperations(self, nums: List[int], x: int) -> int:
target = sum(nums) - x
if target == 0:
return len(nums)
ans, s, smap = 0, 0, {0: -1}
for i, n in enumerate(nums):
s += n
if s - target in smap:
ans = max(ans, i - smap[s - target])
smap[s] = i
return len(nums) - ans if ans else -1
# TESTS
for nums, x, expected in [
([1, 1, 4, 2, 3], 5, 2),
([5, 6, 7, 8, 9], 4, -1),
([3, 2, 20, 1, 1, 3], 10, 5),
([4, 2, 1, 3], 10, 4),
]:
sol = Solution()
actual = sol.minOperations(nums, x)
print("The minimum operations in", nums, "to reduce", x, "to zero ->", actual)
assert actual == expected
| [
"l33tdaima@github.com"
] | l33tdaima@github.com |
b03961fffa86ad304863eef3bced898e77c688c3 | c55aedc3479a4d311fb406d8133b0e0ceb99d2df | /example/kdtree_0_base/kdtree_3_mesh_color.py | 7304ed067ce6dd6971b19b2aab7ca20736696a5c | [] | no_license | tarwcz111111111/DashCam_python | 4a33cdb3e5a8368b81ddc7c0596d4f0802b7c9d6 | 6e025ff49261c146205eb56bbbf4175f1d413f54 | refs/heads/master | 2020-08-25T04:55:16.695561 | 2017-08-28T04:34:59 | 2017-08-28T04:34:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,067 | py | #!/usr/bin/python3
# ==============================================================
# Pack the matrix process into google parse
# ==============================================================
import numpy as np
import triangle
from glumpy import glm
import sys
sys.path.append('/home/andy/Documents/gitHub/DashCam_python/module') # use the module under 'module'
import file_process
import google_parse
import glumpy_setting
import base_process
# Create dashCamFileProcess and load 50 top Dashcam
dashCamFileProcess = file_process.DashCamFileProcessor()
# Manual anchor, but I think this is so wrong.
#anchor = {'panoId': 'uSjqj9Lt256V8I7RckMykA', 'Lat': 25.068939, 'Lon': 121.479781}
anchor = {'panoId': 'JfAAg1RD0myOqNIU0utdNA', 'Lat': 22.622543, 'Lon': 120.285735}
"""
For Visual
"""
sleIndex = 6
for fileIndex in range(sleIndex,sleIndex+1):
fileID = str(dashCamFileProcess.list50[fileIndex][1])
print(fileID, fileIndex)
fileID += '_info3d'
"""
Create the global metric point cloud,
then set the region anchor
"""
sv3DRegion = google_parse.StreetView3DRegion(fileID)
sv3DRegion.init_region(anchor=None)
anchor_matrix_whole = sv3DRegion.anchorMatrix
index = 0
for sv3D_id, sv3D in sorted(sv3DRegion.sv3D_Dict.items()):
### WHY???
sv3D.create_ptcloud_ground_grid()
sv3D.apply_global_adjustment()
sv3D.apply_local_adjustment()
if index == 0:
data = sv3D.ptCLoudData
data_gnd = sv3D.ptCLoudDataGnd
data_gnd_grid = sv3D.ptCLoudDataGndGrid
else:
data = np.concatenate((data, sv3D.ptCLoudData), axis=0)
data_gnd = np.concatenate((data_gnd, sv3D.ptCLoudDataGnd), axis=0)
data_gnd_grid = np.concatenate((data_gnd_grid, sv3D.ptCLoudDataGndGrid), axis=0)
index += 1
if index > 0:
break
#break
gpyWindow = glumpy_setting.GpyWindow()
#programSV3DRegion = glumpy_setting.ProgramSV3DRegion(data=data, name=None, point_size=1, anchor_matrix=anchor_matrix_whole)
#programSV3DRegion.apply_anchor()
#gpyWindow.add_program(programSV3DRegion)
programSV3DRegion = glumpy_setting.ProgramSV3DRegion(data=data_gnd, name=None, point_size=1, anchor_matrix=anchor_matrix_whole)
programSV3DRegion.apply_anchor()
gpyWindow.add_program(programSV3DRegion)
#programSV3DRegion = glumpy_setting.ProgramSV3DRegion(data=data_gnd_grid, name=None, point_size=1, anchor_matrix=anchor_matrix_whole, alpha=0)
#programSV3DRegion.apply_anchor()
#gpyWindow.add_program(programSV3DRegion)
"""
Triangle
"""
tri = np.array(triangle.delaunay(data_gnd['a_position'][:, 0:2]), dtype=np.uint32)
#data_gnd_grid['a_position'] = base_process.sv3d_apply_m4(data=data_gnd_grid['a_position'], m4=np.linalg.inv(anchor_matrix_whole))
#data_gnd['a_position'][:, 2] = 0
programGround = glumpy_setting.ProgramPlane(data=data_gnd, name=str(index), face=tri)
gpyWindow.add_program(programGround)
#programAxis = glumpy_setting.ProgramAxis(line_length=5)
#gpyWindow.add_program(programAxis)
gpyWindow.run()
| [
"ydnaandy123@gmail.com"
] | ydnaandy123@gmail.com |
80149282aabb59543236536d133ab52397c545e0 | 672809bd026d006e785f87c72995a2f368702d63 | /site_main/matcher/matcher.py | 439dbfb48a1e1430f417dd3e2fda9da149d0dabc | [] | no_license | kz26/uchicagolunch | 34c391688897dc88edc78ccc771805c2f76d64d5 | f1b0415856e7a62a8ca12ea824af3483a80c876d | refs/heads/master | 2016-09-06T17:35:21.703308 | 2012-03-23T23:00:36 | 2012-03-23T23:00:36 | 2,806,713 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,249 | py | from site_main.models import *
from site_main.emails import *
from datetime import datetime, timedelta, date, time
import edmonds
from random import choice
# find and process matches
# returns a list of newly created Match objects
def run():
# retrieve a list of unfilled requests
def getRequests():
reqs = Request.objects.filter(expires__gt=datetime.now(), matched=False, active=True)
rl = {}
for r in reqs:
r_id = r.pk
r_dates = set(r.day_set.all().values_list('date', flat=True))
r_prefs = set(r.restaurant_prefs.all().values_list('pk', flat=True))
rl[r_id] = (r_dates, r_prefs)
return rl
# create a graph from the above
# dictionary key: request ID
# dictionary value: compatible request IDs
def createGraph(rl):
g = {}
for k1, v1 in rl.iteritems():
if k1 not in g:
g[k1] = []
for k2, v2 in rl.iteritems():
if k2 == k1: continue
if not v2[0].isdisjoint(v1[0]) and not v2[1].isdisjoint(v1[1]):
g[k1].append(k2)
return g
# runs Edmond's matching algorithm on the input graph
def findMatches(g):
return edmonds.matching(g)
reqs = getRequests()
g = createGraph(reqs)
matches = findMatches(g)
matched = []
results = []
for k, v in matches.iteritems():
if k in matched or v in matched: continue
req1 = reqs[k]
req2 = reqs[v]
suggested_day = choice(tuple(req1[0].intersection(req2[0])))
suggested_date = datetime.combine(suggested_day, time(12)) + timedelta(minutes=choice(range(0, 135, 15)))
suggested_rc = choice(tuple(req1[1].intersection(req2[1])))
suggested_rest = choice(list(Restaurant.objects.filter(category__pk=suggested_rc)))
reqo1 = Request.objects.get(pk=k)
reqo2 = Request.objects.get(pk=v)
mo = Match.objects.create(request1=reqo1, request2=reqo2, location=suggested_rest, date=suggested_date)
notify_match(mo)
for r in (reqo1, reqo2):
r.matched = True
r.save()
matched.extend((k, v))
results.append(mo)
return results
| [
"whitehat2k9@gmail.com"
] | whitehat2k9@gmail.com |
0e036c343fc1a1037156ec9e3dc7c44563c81dbf | b23bb2c9c98909c53e779e762c359fdb7b0cf412 | /tests/unit/raml/tests/test_traits.py | b6eeb58116958ff7ca17198f63b8cd6eedbd49c1 | [
"MIT"
] | permissive | mpetyx/pyapi | 4902e97340e2597fcfe52968dc6902a96d9a3448 | 1c8c5b392e8a943ebff0864b129defdbf21570f2 | refs/heads/master | 2021-01-06T20:37:33.974145 | 2015-05-26T10:06:41 | 2015-05-26T10:06:42 | 29,341,456 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,330 | py | __author__ = 'ad'
import os.path
from pyapi.libraries.pyraml_parser_master import pyraml
from pyapi.libraries.pyraml_parser_master.pyraml import parser
from pyapi.libraries.pyraml_parser_master.pyraml.entities import RamlRoot, RamlTrait, RamlBody, RamlResourceType
fixtures_dir = os.path.join(os.path.dirname(__file__), '../', 'samples')
def test_parse_traits_with_schema():
p = pyraml.parser.load(os.path.join(fixtures_dir, '../samples/media-type.yaml'))
assert isinstance(p, RamlRoot), RamlRoot
assert p.traits, "Property `traits` should be set"
assert len(p.traits) == 1, p.traits
assert isinstance(p.traits["traitOne"], RamlTrait), p.traits
assert isinstance(p.traits["traitOne"].body, RamlBody), p.traits["traitOne"]
assert p.traits["traitOne"].body.schema == """{ "$schema": "http://json-schema.org/draft-03/schema",
"type": "object",
"description": "A product presentation",
"properties": {
"id": { "type": "string" },
"title": { "type": "string" }
}
}
""", p.traits["traitOne"].body.schema
def test_parse_raml_with_many_traits():
p = pyraml.parser.load(os.path.join(fixtures_dir, '../samples/full-config.yaml'))
assert isinstance(p, RamlRoot), RamlRoot
assert p.traits, "Property `traits` should be set"
assert len(p.traits) == 2, p.traits
assert isinstance(p.traits["simple"], RamlTrait), p.traits
assert isinstance(p.traits["knotty"], RamlTrait), p.traits
assert p.traits["simple"].displayName == "simple trait"
assert p.traits["knotty"].displayName == "<<value>> trait"
def test_parse_resource_type_with_references_to_traits():
p = pyraml.parser.load(os.path.join(fixtures_dir, '../samples/media-type.yaml'))
assert isinstance(p, RamlRoot), RamlRoot
assert p.resourceTypes, "Property `traits` should be set"
assert len(p.resourceTypes)
assert 'typeParent' in p.resourceTypes, p.resourceTypes
assert isinstance(p.resourceTypes['typeParent'], RamlResourceType), p.resourceTypes
parent_resource_type = p.resourceTypes['typeParent']
assert parent_resource_type.methods, p.resourceTypes['typeParent']
assert 'get' in parent_resource_type.methods
assert 'typeChild' in p.resourceTypes, p.resourceTypes
assert isinstance(p.resourceTypes['typeChild'], RamlResourceType), p.resourceTypes | [
"mpetyx@gmail.com"
] | mpetyx@gmail.com |
7ee2fd4d8932d9ae1e9f9b0a3189f6b31cfc3a56 | e87532daceef2e6d0db72238d647c5bde0993198 | /apps/market/urls.py | ab5af579c5daee08bd75cf7d23a5bc60b9ba297d | [] | no_license | brijmohan/zamboni | cddfd07078c3eae902785d007c1f1e94b581c269 | 57eca56bfeae4f28547856d64284d10970905809 | refs/heads/master | 2020-12-25T03:11:58.888828 | 2011-10-24T09:25:06 | 2011-10-25T18:26:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 234 | py | from django.conf.urls.defaults import patterns, url
from addons.urls import ADDON_ID
from market import views
urlpatterns = patterns('',
url(r'^verify/%s$' % ADDON_ID, views.verify_receipt,
name='api.market.verify'),
)
| [
"amckay@mozilla.com"
] | amckay@mozilla.com |
7def4e2f75e56070b0616fabe06c78a5747b2448 | 0b793bce2da8c3d09b7956c0672ddbffd46feaed | /yukicoder/yuki051.py | 75ec2cd5a033f3facff8694ae9a30e1056ed7097 | [
"MIT"
] | permissive | knuu/competitive-programming | c6c4e08fb231937d988bdc5a60a8ad6b31b97616 | 16bc68fdaedd6f96ae24310d697585ca8836ab6e | refs/heads/master | 2021-01-17T09:39:02.647688 | 2020-11-07T03:17:22 | 2020-11-07T03:17:22 | 27,886,732 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 114 | py | W = int(input())
D = int(input())
task = []
sumTask = 0
for rest in range(D, 1, -1):
W -= W//rest**2
print(W)
| [
"premier3next@yahoo.co.jp"
] | premier3next@yahoo.co.jp |
8230cf200ed8c3a204e1cdb5def5c66e6fbfd784 | f7aa97fe19b431523f35dc5badc9e8ff919ffa00 | /fss17/project/tools/axe/libWhere.py | 2731db6b521497034bcb603c18bfc22c6d81453a | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | rahlk/fss17 | 3b331427d450c5bb46b71b4aa5c77c59a8ec0a70 | 49e22c4ad01ff751f24c3e5702b7fa36a3a18e96 | refs/heads/master | 2021-01-19T18:03:13.364689 | 2017-12-12T12:51:28 | 2017-12-12T12:51:28 | 101,105,693 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,919 | py | """
# Support Code
## Standard Headers
"""
from __future__ import division, print_function
import random
import sys
sys.dont_write_bytecode = True
from settingsWhere import *
"""
## Simple, low-level stuff
### Maths Stuff
"""
def gt(x, y): return x > y
def lt(x, y): return x < y
def medianIQR(lst, ordered=False):
if not ordered:
lst = sorted(lst)
n = len(lst)
q = n // 4
iqr = lst[q * 3] - lst[q]
if n % 2:
return lst[q * 2], iqr
else:
p = max(0, q - 1)
return (lst[p] + lst[q]) * 0.5, iqr
def median(lst, ordered=False):
return medianIQR(lst, ordered)[0]
"""
An accumulator for reporting on numbers.
"""
class N():
"Add/delete counts of numbers."
def __init__(i, inits=[]):
i.zero()
map(i.__iadd__, inits)
def zero(i):
i.n = i.mu = i.m2 = 0
i.cache = Cache()
def sd(i):
if i.n < 2:
return 0
else:
return (max(0, i.m2) / (i.n - 1)) ** 0.5
def __iadd__(i, x):
i.cache += x
i.n += 1
delta = x - i.mu
i.mu += delta / (1.0 * i.n)
i.m2 += delta * (x - i.mu)
return i
def __isub__(i, x):
i.cache = Cache()
if i.n < 2: return i.zero()
i.n -= 1
delta = x - i.mu
i.mu -= delta / (1.0 * i.n)
i.m2 -= delta * (x - i.mu)
return i
class Cache:
"Keep a random sample of stuff seen so far."
def __init__(i, inits=[]):
i.all, i.n, i._has = [], 0, None
map(i.__iadd__, inits)
def __iadd__(i, x):
i.n += 1
if len(i.all) < 128: # if not full
i._has = None
i.all += [x] # then add
else: # otherwise, maybe replace an old item
if random.random() <= The.cache.size / i.n:
i._has = None
i.all[int(random.random() * The.cache.size)] = x
return i
def has(i):
if i._has == None:
lst = sorted(i.all)
med, iqr = medianIQR(i.all, ordered=True)
i._has = o(
median=med, iqr=iqr,
lo=i.all[0], hi=i.all[-1])
return i._has
"""
### Random stuff.
"""
by = lambda x: random.uniform(0, x)
rseed = random.seed
any = random.choice
rand = random.random
def seed(r=None):
global The
if The is None: The = defaults()
if r is None: r = The.seed
rseed(r)
"""
### List Handling Tricks
"""
def first(lst): return lst[0]
def second(lst): return lst[1]
def third(lst): return lst[2]
"""
### Printing Stuff
Print without newline:
"""
def say(*lst): print(*lst, end="")
"""
Print a list of numbers without an excess of decimal places:
"""
def gs(lst): return [g(x) for x in lst]
def g(x):
txt = '%g' % x
return int(txt) if int(x) == x else float(txt)
"""
Pretty print a dictionary:
"""
def showd(d):
def one(k, v):
if isinstance(v, list):
v = gs(v)
if isinstance(v, float):
return ":%s %g" % (k, v)
return ":%s %s" % (k, v)
return ' '.join([one(k, v) for k, v in
sorted(d.items())
if not "_" in k])
"""
## Decorator to run code at Start-up
"""
def go(f):
"A decorator that runs code at load time."
print("\n# ---|", f.__name__, "|-----------------")
if f.__doc__: print("#", f.__doc__)
f()
"""
## Handling command line options.
Convert command line to a function call.
e.g. if the file lib.py ends with
if __name__ == '__main__':eval(todo())
then
python lib.py myfun :a 1 :b fred
results in a call to _myfun(a=1,b='fred')_.
"""
def todo(com="print(The._logo,'WHERE (2.0) you at?')"):
import sys
if len(sys.argv) < 2: return com
def strp(x):
return isinstance(x, basestring)
def wrap(x):
return "'%s'" % x if strp(x) else str(x)
def oneTwo(lst):
while lst: yield lst.pop(0), lst.pop(0)
def value(x):
try:
return eval(x)
except:
return x
def two(x, y):
return x[1:] + "=" + wrap(value(y))
twos = [two(x, y) for x, y in oneTwo(sys.argv[2:])]
return sys.argv[1] + '(**dict(' + ','.join(twos) + '))'
"""
## More interesting, low-level stuff
"""
def timing(f, repeats=10):
"How long does 'f' take to run?"
import time
time1 = time.clock()
for _ in range(repeats):
f()
return (time.clock() - time1) * 1.0 / repeats
"""
## data.dat Completion Tool
Fills in some details on a table of data.dat. For example,
def nasa93():
vl=1;l=2;n=3;h=4;vh=5;xh=6
return data.dat(indep= [
'Prec', 'Flex', 'Resl', 'Team', 'Pmat', 'rely', 'data.dat', 'cplx', 'ruse',
'docu', 'time', 'stor', 'pvol', 'acap', 'pcap', 'pcon', 'aexp', 'plex',
'ltex', 'tool', 'site', 'sced', 'kloc'],
less = ['effort', 'defects', 'months'],
_rows=[
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,25.9,117.6,808,15.3],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,24.6,117.6,767,15.0],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,7.7,31.2,240,10.1],
...
Adds in information on _cols_, _decisions_, _hi,lo_, etc:
{ :cols [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 22, 23, 24]
:decisions [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18,
19, 20, 21, 22]
:eval <function <lambda> at 0x7f3f825bea28>
:hi {0: 4, 1: 4, 2: 4, 3: 5, 4: 4, 5: 5, 6: 5,
7: 6, 8: 3, 9: 3, 10: 6, 11: 6, 12: 4, 13: 5,
14: 5, 15: 3, 16: 5, 17: 4, 18: 4, 19: 4,
20: 3, 21: 3, 22: 980, 23: 8211, 24: 50961}
:lo {0: 4, 1: 4, 2: 4, 3: 5, 4: 2, 5: 2, 6: 2,
7: 2, 8: 3, 9: 3, 10: 3, 11: 3, 12: 2,
13: 3, 14: 3, 15: 3, 16: 2, 17: 1, 18: 1,
19: 3, 20: 3, 21: 2, 22: 0.9, 23: 8.4, 24: 28}
:names ['Prec', 'Flex', 'Resl', 'Team', 'Pmat',
'rely', 'data.dat', 'cplx', 'ruse', 'docu',
'time', 'stor', 'pvol', 'acap', 'pcap',
'pcon', 'aexp', 'plex', 'ltex', 'tool',
'site', 'sced', 'kloc', 'effort',
'defects', 'months']
:objectives [22, 23, 24]
:w {0: 1, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1, 6: 1,
7: 1, 8: 1, 9: 1, 10: 1, 11: 1, 12: 1, 13: 1,
14: 1, 15: 1, 16: 1, 17: 1, 18: 1, 19: 1,
20: 1, 21: 1, 22: -1, 23: -1, 24: -1}
}
Code:
"""
def data(indep=[], less=[], more=[], _rows=[]):
nindep = len(indep)
ndep = len(less) + len(more)
m = o(lo={}, hi={}, w={},
eval=lambda m, it: True,
_rows=[o(cells=r, score=0, scored=False,
x0=None, y0=None)
for r in _rows],
names=indep + less + more)
m.decisions = [x for x in range(nindep)]
m.objectives = [nindep + x - 1 for x in range(ndep)]
m.cols = m.decisions + m.objectives
for x in m.decisions:
m.w[x] = 1
for y, _ in enumerate(less):
m.w[x + y] = -1
for z, _ in enumerate(more):
m.w[x + y + z] = 1
for x in m.cols:
all = sorted(row.cells[x] for row in m._rows)
m.lo[x] = all[0]
m.hi[x] = all[-1]
return m
"""
## Start-up Actions
"""
if __name__ == '__main__': eval(todo())
| [
"i.m.ralk@gmail.com"
] | i.m.ralk@gmail.com |
9d92f42947a9a168d3bebbdd5e5d06464b004d38 | b87f66b13293782321e20c39aebc05defd8d4b48 | /maps/build/mayavi/enthought/tvtk/tests/test_class_tree.py | 963470a3f830e41f91e935de287c3b563a80c44f | [] | no_license | m-elhussieny/code | 5eae020932d935e4d724c2f3d16126a0d42ebf04 | 5466f5858dbd2f1f082fa0d7417b57c8fb068fad | refs/heads/master | 2021-06-13T18:47:08.700053 | 2016-11-01T05:51:06 | 2016-11-01T05:51:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,944 | py | # Author: Prabhu Ramachandran
# License: BSD style
# Copyright (c) 2004, Enthought, Inc.
"""Tests class_tree.py. Uses the vtk module to test the code. Also
tests if the tree generation works for the __builtin__ module.
"""
import unittest
from enthought.tvtk import class_tree
import vtk
import __builtin__
# This computation can be expensive, so we cache it.
_cache = class_tree.ClassTree(vtk)
_cache.create()
def get_level(klass):
"""Gets the inheritance level of a given class."""
if not klass.__bases__:
return 0
else:
return max([get_level(b) for b in klass.__bases__]) + 1
class TestClassTree(unittest.TestCase):
def setUp(self):
self.t = _cache
def test_basic_vtk(self):
"""Basic tests for the VTK module."""
t = self.t
self.assertEqual(t.get_node('vtkObject').name, 'vtkObject')
self.assertEqual(t.get_node('vtkObject').parents[0].name,
'vtkObjectBase')
if (hasattr(vtk, 'vtkArrayCoordinates')
and issubclass(vtk.vtkArrayCoordinates, object)):
self.assertEqual(len(t.tree[0]), 2)
names = [x.name for x in t.tree[0]]
names.sort()
self.assertEqual(names, ['object', 'vtkObjectBase'])
else:
self.assertEqual(len(t.tree[0]), 1)
self.assertEqual(t.tree[0][0].name, 'vtkObjectBase')
def test_ancestors(self):
"""Check if get_ancestors is OK."""
# The parent child information is already tested so this test
# needs to ensure that the method works for a few known
# examples.
# Simple VTK test.
t = self.t
n = t.get_node('vtkDataArray')
x = vtk.vtkDataArray
ancestors = []
while x.__name__ != 'vtkObjectBase':
x = x.__bases__[0]
ancestors.append(x.__name__)
self.assertEqual([x.name for x in n.get_ancestors()], ancestors)
# Simple __builtin__ test.
t = class_tree.ClassTree(__builtin__)
t.create()
n = t.get_node('TabError')
bases = ['IndentationError', 'SyntaxError',
'StandardError', 'Exception']
if len(Exception.__bases__) > 0:
bases.extend(['BaseException', 'object'])
self.assertEqual([x.name for x in n.get_ancestors()],
bases)
def test_parent_child(self):
"""Check if the node's parent and children are correct."""
t = self.t
for node in t:
n_class = t.get_class(node.name)
base_names = [x.__name__ for x in n_class.__bases__]
base_names.sort()
parent_names = [x.name for x in node.parents]
parent_names.sort()
self.assertEqual(base_names, parent_names)
for c in node.children:
c_class = t.get_class(c.name)
base_names = [x.__name__ for x in c_class.__bases__]
self.assertEqual(node.name in base_names, True)
def test_level(self):
"""Check the node levels."""
t = self.t
for node in t:
self.assertEqual(get_level(t.get_class(node.name)), node.level)
def test_tree(self):
"""Check the tree structure."""
t = self.t
n = sum([len(x) for x in t.tree])
self.assertEqual(n, len(t.nodes))
for level, nodes in enumerate(t.tree):
for n in nodes:
self.assertEqual(n.level, level)
def test_builtin(self):
"""Check if tree structure for __builtin__ works."""
# This tests to see if the tree structure generation works for
# the __builtin__ module.
t = class_tree.ClassTree(__builtin__)
t.create()
self.t = t
self.test_parent_child()
self.test_level()
self.test_tree()
if __name__ == "__main__":
unittest.main()
| [
"fspaolo@gmail.com"
] | fspaolo@gmail.com |
87bfa0465302e0f16e8e9caa8994ab7d156cc520 | 1eb960cec1c1bc891ea7cb9874b11182d753fabb | /news/migrations/0004_message_to_user.py | 197b9b145a6e878b07a38558f57a7fb1d0ef7532 | [] | no_license | squallcs12/kidnews-fbhack | 516f87160042389b9a9be1016d6a71dc95f97d13 | 154368a40b2042671b933a9ac53ca2e469266c84 | refs/heads/master | 2021-01-19T04:24:35.254491 | 2016-07-31T03:27:42 | 2016-07-31T03:27:42 | 64,523,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 713 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-07-30 11:08
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('news', '0003_emotion_useremotion'),
]
operations = [
migrations.AddField(
model_name='message',
name='to_user',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='to_users', to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
]
| [
"daotranbang@gmail.com"
] | daotranbang@gmail.com |
49520f217fc253f8dd6e40a4f3b78353bec18c90 | 4a8c1f7d9935609b780aff95c886ef7781967be0 | /atcoder/_codeforces/1303_c.py | 4d8b66d7ea2f0f8e979a1a705aaa21fe7ae12ec1 | [] | no_license | recuraki/PythonJunkTest | d5e5f5957ac5dd0c539ef47759b1fe5ef7a2c52a | 2556c973d468a6988d307ce85c5f2f8ab15e759a | refs/heads/master | 2023-08-09T17:42:21.875768 | 2023-07-18T23:06:31 | 2023-07-18T23:06:31 | 13,790,016 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,826 | py | import sys
from io import StringIO
import unittest
import logging
logging.basicConfig(level=logging.DEBUG)
def resolve():
q = int(input())
for _ in range(q):
s = input()
d = dict()
f = True
# STEP1: 入力文字列から隣り合う文字の関係を作る
for i in range(len(s)):
# 前後の文字を得る
nkey = s[i + 1] if i != len(s) - 1 else None
pkey = s[i - 1] if i != 0 else None
l = d.get(s[i], [])
# 前の文字がすでに隣接関係に登録されていないなら
if pkey is not None and pkey not in l:
# 文字を入れたいのにすでに両端が他の文字と指定されているなら不可能
if len(l) >= 2:
f = False
break
l.append(pkey)
d[s[i]] = l
l2 = d.get(pkey, [])
if s[i] not in l2:
if len(l2) >= 2:
f = False
break
l2.append(s[i])
d[pkey] = l2
if nkey is not None and nkey not in l:
# 文字を入れたいのにすでに両端が他の文字と指定されているなら不可能
if len(l) >= 2:
f = False
break
l.append(nkey)
d[s[i]] = l
l2 = d.get(nkey, [])
if s[i] not in l2:
# 文字を入れたいのにその左右がreserveされているなら
if len(l2) >= 2:
f = False
break
l2.append(s[i])
d[nkey] = l2
# STEP1終わり。
# codedocaという入力から以下の制約の辞書を作れる
# {'c': ['o', 'a'], 'o': ['c', 'd'], 'd': ['o', 'e'], 'e': ['d'], 'a': ['c']}
# print(d)
# STEP2 隣接するキーボードを作る
s = ""
# 上記で作った文字ごとに順番に処理を行う
for k in d.keys():
if s == "": # 空文字列の時
if len(d[k]) == 0:
s += k
elif len(d[k]) == 1:
s += k + d[k][0]
elif len(d[k]) == 2:
s += d[k][0] + k + d[k][1]
# その文字の位置がまだキーボードに存在しないなら
elif s.find(k) == -1:
ic1 = ic2 = -1
# ヒントとなる制約文字列の位置を探し
if len(d[k]) == 1:
ic1 = s.find(d[k][0])
elif len(d[k]) == 2:
ic1 = s.find(d[k][0])
ic2 = s.find(d[k][1])
# もし、2文字とも配置されているならその間に別の文字を挟まないといけないのでNG
if ic1 != -1 and ic2 != -1:
f = False
# ic1だけが配置されているなら
elif ic1 != -1:
# 先頭なら
if ic1 == 0:
s = k + s # この文字を先頭に加え
if len(d[k]) == 2: # さらにもう一文字未設置文字があるなら
s = d[k][1] + s # さらに先頭にその文字を加える
elif ic1 == len(s) - 1: # 一番後ろなら↑の逆を行う
s = s + k
if len(d[k]) == 2:
s = s + d[k][1]
elif ic2 != -1: # ic2探しの旅
if ic2 == 0: # 先頭がその文字なら
s = s[k][0] + k + s # その文字とさらに隣にいるべき文字を追加
elif ic2 == len(s) - 1: # 一番後ろなら
s = s + k + s[k][0] # 同じように
else: # その文字がすでに配置されているならば
ic = s.find(k)
if ic == 0: # その文字が先頭にあるなら
if len(d[k]) == 2 and s[1] == d[k][0]: # 先頭の隣が1個めの文字のとき
if d[k][1] in s: # 2つめの文字をおこうとする(もうあるなら置けないから失敗)
f = False
s = d[k][1] + s
elif len(d[k]) == 2 and s[1] == d[k][1]:# 先頭の隣が2個めの文字のとき
if d[k][0] in s:# 1つめの文字をおこうとする(もうあるなら置けないから失敗)
f = False
s = d[k][0] + s
elif len(d[k]) == 2: # 先頭の隣が1つめの文字でも2つめの文字でもないなら失敗
f = False
elif len(d[k]) == 1 and s[1] == d[k][0]: # 先頭の隣があるべき隣接文字の場合
pass #なにもしない(=もう配置されているんだから)
else: # それ以外の時、というのはあるべき文字でない場合なので失敗
f = False
elif ic == (len(s) - 1): # その文字が文末にあるなら
if len(d[k]) == 2 and s[len(s) - 2] == d[k][0]: # 隣が1個めの文字のとき
if d[k][1] in s:
f = False
s = s + d[k][1]
elif len(d[k]) == 2 and s[len(s) - 2] == d[k][1]: # 隣が2個めの文字のとき
if d[k][0] in s:
f = False
s = s + d[k][0]
elif len(d[k]) == 2: # 先頭の隣が1つめの文字でも2つめの文字でもないなら失敗
f = False
# 隣が1つめの文字でも2つめの文字でもないなら失敗
elif len(d[k]) == 1 and s[len(s) - 2] == d[k][0]:
pass # 正常
else:
f = False
pass
else: # そうでないなら真ん中に絶対あるので
if s[ic - 1] not in d[k] or s[ic + 1] not in d[k]: # 両端の文字が片方でも違うなら失敗
f = False
else: # この場合は両方がいずれかの文字なのでok
pass
# STEP2 終わり
# STEP3 他の文字で存在しないものをキーボードに足していく
if f:
list_lower = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't', 'u', 'v', 'w', 'x', 'y', 'z']
for c in list_lower:
if c not in s:
s = s + c
print("YES")
print(s)
else:
print("NO")
class TestClass(unittest.TestCase):
def assertIO(self, input, output):
stdout, stdin = sys.stdout, sys.stdin
sys.stdout, sys.stdin = StringIO(), StringIO(input)
resolve()
sys.stdout.seek(0)
out = sys.stdout.read()[:-1]
sys.stdout, sys.stdin = stdout, stdin
self.assertEqual(out, output)
def test_input_1(self):
print("test_input_1")
input = """5
ababa
codedoca
abcda
zxzytyz
abcdefghijklmnopqrstuvwxyza"""
output = """YES
bacdefghijklmnopqrstuvwxyz
YES
edocabfghijklmnpqrstuvwxyz
NO
YES
xzytabcdefghijklmnopqrsuvw
NO"""
self.assertIO(input, output)
if __name__ == "__main__":
unittest.main() | [
"glenda.kanai@gmail.com"
] | glenda.kanai@gmail.com |
0b3c6f4f9fd250ff776a7798baaea5f2b0d80fcc | 60b704673152dfa5130405ce2a318b710fc3b120 | /wrappers/arlexecute/simulation/testing_support.py | 58f5c5bf0e5ccc0c1fe13b471934c2d5a6963bbc | [
"Apache-2.0"
] | permissive | rstofi/algorithm-reference-library | 02b8e6735141fbbc1941cef2f36c8ed7ef2c3e38 | 03415e18ea55afc54eb9534dcd0ca2c7a4b0020a | refs/heads/master | 2020-04-05T20:08:40.043608 | 2019-11-22T09:16:48 | 2019-11-22T09:16:48 | 157,166,061 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,384 | py | """
Functions that aid testing in various ways.
"""
from processing_components.simulation.testing_support import create_test_image
from processing_components.simulation.testing_support import create_test_image_from_s3
from processing_components.simulation.testing_support import create_low_test_image_from_gleam
from processing_components.simulation.testing_support import create_low_test_skymodel_from_gleam
from processing_components.simulation.testing_support import create_low_test_skycomponents_from_gleam
from processing_components.simulation.testing_support import create_test_skycomponents_from_s3
from processing_components.simulation.testing_support import replicate_image
from processing_components.simulation.testing_support import create_blockvisibility_iterator
from processing_components.simulation.testing_support import simulate_gaintable
from processing_components.simulation.testing_support import simulate_pointingtable
from processing_components.simulation.testing_support import simulate_pointingtable_from_timeseries
from processing_components.simulation.testing_support import ingest_unittest_visibility
from processing_components.simulation.testing_support import create_unittest_components
from processing_components.simulation.testing_support import create_unittest_model
from processing_components.simulation.testing_support import insert_unittest_errors | [
"realtimcornwell@gmail.com"
] | realtimcornwell@gmail.com |
3c0dc092ed3f1e25bb2a30dd063ff2039cc1f90e | 3c000380cbb7e8deb6abf9c6f3e29e8e89784830 | /venv/Lib/site-packages/cobra/modelimpl/fv/rtshardedregisterupdates.py | 54620b697d7fe65c493e10a884b52b49d7a818ec | [] | no_license | bkhoward/aciDOM | 91b0406f00da7aac413a81c8db2129b4bfc5497b | f2674456ecb19cf7299ef0c5a0887560b8b315d0 | refs/heads/master | 2023-03-27T23:37:02.836904 | 2021-03-26T22:07:54 | 2021-03-26T22:07:54 | 351,855,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,386 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class RtShardedRegisterUpdates(Mo):
"""
Mo doc not defined in techpub!!!
"""
meta = TargetRelationMeta("cobra.model.fv.RtShardedRegisterUpdates", "cobra.model.fv.ShardedAREpPUpd")
meta.moClassName = "fvRtShardedRegisterUpdates"
meta.rnFormat = "rtshardedRegisterUpdates"
meta.category = MoCategory.RELATIONSHIP_FROM_LOCAL
meta.label = "Sharded AREpP Updates"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.parentClasses.add("cobra.model.fv.ShardedAREpPUpd")
meta.superClasses.add("cobra.model.reln.From")
meta.superClasses.add("cobra.model.reln.Inst")
meta.rnPrefixes = [
('rtshardedRegisterUpdates', False),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "tCl", "tCl", 23237, PropCategory.REGULAR)
prop.label = "Target-class"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 7867
prop.defaultValueStr = "fvShardedAREpPUpd"
prop._addConstant("fvShardedAREpPUpd", None, 7867)
prop._addConstant("unspecified", "unspecified", 0)
meta.props.add("tCl", prop)
prop = PropMeta("str", "tDn", "tDn", 100, PropCategory.REGULAR)
prop.label = "Target-dn"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("tDn", prop)
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"bkhoward@live.com"
] | bkhoward@live.com |
474b5eb47ebc4029a4d155c60b3998250e41f214 | 81ec35443bc2567118aece66254c021e73f960d1 | /python3/10.8.dates_and_times.py | 5cdf7b5a8067e22dcf2ab6a0481b3b184e4d981b | [] | no_license | folkol/tutorials | 95f1d641843cc26c04a79f74270721c7de4ac628 | 962b0fd89dac244e7f9dcb03773a25d96413fb0b | refs/heads/master | 2023-08-17T18:50:18.358911 | 2023-08-02T20:46:53 | 2023-08-02T20:47:35 | 66,833,956 | 0 | 0 | null | 2023-09-05T03:40:46 | 2016-08-29T10:26:01 | JavaScript | UTF-8 | Python | false | false | 331 | py | from datetime import date
now = date.today()
print(now) # 2017-06-04
print(repr(now)) # datetime.date(2017, 6, 4)
s = now.strftime('%m-%d-%y. %d %b %Y is a %A on the %d say of %B')
print(s) # 06-04-17. 04 Jun 2017 is a Sunday on the 04 say of June
birthday = date(1980, 11, 2)
age = now - birthday
print(age.days) # 13363
| [
"mattias4@kth.se"
] | mattias4@kth.se |
ee00de44f4a031e1d7bf9de64e82c1f55cbf8028 | f22ca9aecda111a019502b462ce6772cb22d9425 | /test/test_cart_warehouse.py | 9147d67572c24863d56bc019a5bda0aaabd271d4 | [] | no_license | sivanv-unbxd/a2c-sdk-pim | cac05bc6335ddc3c4121d43e2dc476a6fec14965 | 51a07a0b7f90d74569ad14b47b174da7ac1fc374 | refs/heads/main | 2023-05-29T05:45:32.279821 | 2021-06-09T03:52:11 | 2021-06-09T03:52:11 | 375,218,584 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 864 | py | # coding: utf-8
"""
Swagger API2Cart
API2Cart # noqa: E501
OpenAPI spec version: 1.1
Contact: contact@api2cart.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.cart_warehouse import CartWarehouse # noqa: E501
from swagger_client.rest import ApiException
class TestCartWarehouse(unittest.TestCase):
"""CartWarehouse unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testCartWarehouse(self):
"""Test CartWarehouse"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.cart_warehouse.CartWarehouse() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"sivanv@unbxd.com"
] | sivanv@unbxd.com |
014f19f93ff2542d81ce256e5af79c1fc0117b20 | 8981902427dc577228dfd5611c6afe86c3e2e9e2 | /dsmr_mqtt/services.py | 6a268e79b229fa528b8db0a29c46449e4d2c96f9 | [] | no_license | genie137/dsmr-reader | 5515f4f92bb05bcf00f0e8a0fbd1a018d408950b | 4d934b4838cb2de4a66ff193f4f3095e9beecd99 | refs/heads/master | 2020-03-21T18:14:05.182137 | 2018-06-12T14:54:55 | 2018-06-12T14:54:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,411 | py | import configparser
import logging
import json
from django.core import serializers
from django.utils import timezone
import paho.mqtt.publish as publish
from dsmr_mqtt.models.settings import broker, day_totals, telegram, meter_statistics
from dsmr_consumption.models.consumption import ElectricityConsumption
from dsmr_datalogger.models.statistics import MeterStatistics
import dsmr_consumption.services
logger = logging.getLogger('dsmrreader')
def get_broker_configuration():
""" Returns the broker configuration from the settings, in dict format, ready to use with paho.mqtt. """
broker_settings = broker.MQTTBrokerSettings.get_solo()
kwargs = {
'hostname': broker_settings.hostname,
'port': broker_settings.port,
'client_id': broker_settings.client_id,
'auth': None,
}
if broker_settings.username and broker_settings.password:
kwargs.update({
'auth': {
'username': broker_settings.username,
'password': broker_settings.password,
}
})
return kwargs
def publish_raw_dsmr_telegram(data):
""" Publishes a raw DSMR telegram string to a broker, if set and enabled. """
raw_settings = telegram.RawTelegramMQTTSettings.get_solo()
if not raw_settings.enabled:
return
broker_kwargs = get_broker_configuration()
try:
publish.single(topic=raw_settings.topic, payload=data, **broker_kwargs)
except ValueError as error:
logger.error('MQTT publish_raw_dsmr_telegram() | {}'.format(error))
def publish_json_dsmr_reading(reading):
""" Publishes a JSON formatted DSMR reading to a broker, if set and enabled. """
json_settings = telegram.JSONTelegramMQTTSettings.get_solo()
if not json_settings.enabled:
return
# User specified formatting.
config_parser = configparser.ConfigParser()
config_parser.read_string(json_settings.formatting)
json_mapping = config_parser['mapping']
json_dict = {}
# Copy all fields described in the mapping.
for k, v in reading.__dict__.items():
if k not in json_mapping:
continue
config_key = json_mapping[k]
json_dict[config_key] = v
json_reading = json.dumps(json_dict, cls=serializers.json.DjangoJSONEncoder)
broker_kwargs = get_broker_configuration()
try:
publish.single(topic=json_settings.topic, payload=json_reading, **broker_kwargs)
except ValueError as error:
logger.error('MQTT publish_json_dsmr_reading() | {}'.format(error))
def publish_split_topic_dsmr_reading(reading):
""" Publishes a DSMR reading to a broker, formatted in a separate topic per field name, if set and enabled. """
split_topic_settings = telegram.SplitTopicTelegramMQTTSettings.get_solo()
if not split_topic_settings.enabled:
return
# User specified formatting.
config_parser = configparser.ConfigParser()
config_parser.read_string(split_topic_settings.formatting)
topic_mapping = config_parser['mapping']
mqtt_messages = []
serialized_reading = json.loads(serializers.serialize('json', [reading]))
reading_fields = dict(serialized_reading[0]['fields'].items())
reading_fields['id'] = serialized_reading[0]['pk']
# Copy all fields described in the mapping.
for k, v in reading_fields.items():
if k not in topic_mapping:
continue
mqtt_messages.append({
'topic': topic_mapping[k],
'payload': v,
})
broker_kwargs = get_broker_configuration()
try:
publish.multiple(msgs=mqtt_messages, **broker_kwargs)
except ValueError as error:
logger.error('MQTT publish_split_topic_dsmr_reading() | {}'.format(error))
def publish_day_totals():
""" Publishes day totals to a broker, if set and enabled. """
json_settings = day_totals.JSONDayTotalsMQTTSettings.get_solo()
split_topic_settings = day_totals.SplitTopicDayTotalsMQTTSettings.get_solo()
if not json_settings.enabled and not split_topic_settings.enabled:
return
try:
latest_electricity = ElectricityConsumption.objects.all().order_by('-read_at')[0]
except IndexError:
# Don't even bother when no data available.
return
day_consumption = dsmr_consumption.services.day_consumption(
day=timezone.localtime(latest_electricity.read_at).date()
)
mqtt_messages = []
if json_settings.enabled:
mqtt_messages += day_totals_as_json(day_consumption, json_settings)
if split_topic_settings.enabled:
mqtt_messages += day_totals_per_topic(day_consumption, split_topic_settings)
broker_kwargs = get_broker_configuration()
try:
publish.multiple(msgs=mqtt_messages, **broker_kwargs)
except ValueError as error:
logger.error('MQTT publish_day_totals() | {}'.format(error))
def day_totals_as_json(day_consumption, json_settings):
""" Converts day consumption to JSON format. """
config_parser = configparser.ConfigParser()
config_parser.read_string(json_settings.formatting)
json_mapping = config_parser['mapping']
json_dict = {}
# Use mapping to setup fields for JSON message.
for k, v in day_consumption.items():
if k not in json_mapping:
continue
config_key = json_mapping[k]
json_dict[config_key] = v
json_data = json.dumps(json_dict, cls=serializers.json.DjangoJSONEncoder)
return [{
'topic': json_settings.topic,
'payload': json_data,
}]
def day_totals_per_topic(day_consumption, split_topic_settings):
""" Converts day consumption to split topic messages. """
config_parser = configparser.ConfigParser()
config_parser.read_string(split_topic_settings.formatting)
topic_mapping = config_parser['mapping']
mqtt_messages = []
# Use mapping to setup fields for each message/topic.
for k, v in day_consumption.items():
if k not in topic_mapping:
continue
mqtt_messages.append({
'topic': topic_mapping[k],
'payload': str(v),
})
return mqtt_messages
def publish_split_topic_meter_statistics():
""" Publishes meter statistics to a broker, formatted in a separate topic per field name, if set and enabled. """
split_topic_settings = meter_statistics.SplitTopicMeterStatisticsMQTTSettings.get_solo()
if not split_topic_settings.enabled:
return
# User specified formatting.
config_parser = configparser.ConfigParser()
config_parser.read_string(split_topic_settings.formatting)
topic_mapping = config_parser['mapping']
mqtt_messages = []
serialized_reading = json.loads(serializers.serialize('json', [MeterStatistics.get_solo()]))
reading_fields = dict(serialized_reading[0]['fields'].items())
reading_fields['id'] = serialized_reading[0]['pk']
# Copy all fields described in the mapping.
for k, v in reading_fields.items():
if k not in topic_mapping:
continue
mqtt_messages.append({
'topic': topic_mapping[k],
'payload': v,
})
broker_kwargs = get_broker_configuration()
try:
publish.multiple(msgs=mqtt_messages, **broker_kwargs)
except ValueError as error:
logger.error('MQTT publish_split_topic_meter_statistics() | {}'.format(error))
| [
"github@dennissiemensma.nl"
] | github@dennissiemensma.nl |
6e26e8d19c926c36afe3f865217af8c274fafe78 | 3697d04e8daa01e880f8078bc38426eb23389b90 | /test/test_inline_response2003_division_house.py | c10b49eb99d35d617188a9deabb9f6597d8c3b51 | [] | no_license | Irishsmurf/OireachtasAPI | 3e4ed3b6a1a0fd815cc929f16af0b3ef39d76e13 | 979d354d39cc2957c4009c62ef205215ae8ba123 | refs/heads/master | 2023-08-02T14:56:29.951977 | 2020-04-13T18:33:56 | 2020-04-13T18:33:56 | 255,411,309 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,732 | py | # coding: utf-8
"""
Houses of the Oireachtas Open Data APIs
The Houses of the Oireachtas is providing these APIs to allow our datasets to be retrieved and reused as widely as possible. They are intended to be used in conjunction with https://data.oireachtas.ie, from where our datasets can be accessed directly. By using the APIs, users can make metadata queries to identify the specific data they require. New data are available through the API as soon as they are published. Currently, https://data.oireachtas.ie contains data in XML format from the Official Report of the Houses of the Oireachtas (the \"debates\") and replies to Parliamentary Questions in XML files complying with the [Akoma Ntoso](http://akomantoso.org) schema, as well data in PDF format for Bills, Acts and other documents published by the Houses of the Oireachtas. Files can be retrieved from https://data.oireachtas.ie by adding the URI fragment contained in the \"formats\" fields of the JSON documents returned by these APIs. At the moment only PDF and XML files are available directly from https://data.oireachtas.ie, but this will become the endpoint for direct access of all \"uri\" fields in the data queried through https://api.oireachtas.ie. We will also be making bulk downloads available through https://data.oireachtas.ie. Please note the APIs are a work in progress. We are working on expanding the range of datasets we publish, and we are interested in hearing about how to make these APIs more useful and wide ranging. For these reasons, we welcome any feedback, suggestions and user stories to open.data@oireachtas.ie Data published through these APIs are made available under the [Oireachtas (Open Data) PSI Licence](https://beta.oireachtas.ie/en/open-data/license/) # noqa: E501
OpenAPI spec version: 1.0
Contact: open.data@oireachtas.ie
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import oireachtas_api
from oireachtas_api.models.inline_response2003_division_house import InlineResponse2003DivisionHouse # noqa: E501
from oireachtas_api.rest import ApiException
class TestInlineResponse2003DivisionHouse(unittest.TestCase):
"""InlineResponse2003DivisionHouse unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testInlineResponse2003DivisionHouse(self):
"""Test InlineResponse2003DivisionHouse"""
# FIXME: construct object with mandatory attributes with example values
# model = oireachtas_api.models.inline_response2003_division_house.InlineResponse2003DivisionHouse() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"dave@paddez.com"
] | dave@paddez.com |
79534b04cc124bc0a45a563f8ce019c809409d7b | 88994e2e840a70ec702cee09e1a13813aa6f800c | /tests/models/observations/test_observations_input_files.py | bccc95a62ae1ed7dbee2e604e345dec01f022f35 | [] | no_license | Clinical-Genomics/cg | 1e9eb0852f742d555a48e8696914ebe177f7d436 | d2ec6d25b577dd6938bbf92317aeff1d6b3c5b08 | refs/heads/master | 2023-09-01T02:04:04.229120 | 2023-08-31T13:50:31 | 2023-08-31T13:50:31 | 82,567,026 | 19 | 8 | null | 2023-09-14T15:24:13 | 2017-02-20T14:29:43 | Python | UTF-8 | Python | false | false | 2,578 | py | """Test ObservationsInputFiles pydantic model behaviour."""
from pathlib import Path
import pytest
from pydantic import ValidationError
from cg.models.observations.input_files import (
MipDNAObservationsInputFiles,
BalsamicObservationsInputFiles,
)
def test_instantiate_input_files(observations_input_files_raw: dict):
"""Tests input files against a pydantic MipDNAObservationsInputFiles."""
# GIVEN a dictionary with the basic input files
# WHEN instantiating an observations input files object
input_files = MipDNAObservationsInputFiles(**observations_input_files_raw)
# THEN assert that it was successfully created
assert isinstance(input_files, MipDNAObservationsInputFiles)
def test_instantiate_input_files_missing_field(
observations_input_files_raw: dict, file_does_not_exist: Path
):
"""Tests input files against a pydantic MipDNAObservationsInputFiles with not existent field."""
# GIVEN a dictionary with the basic input files and a file path that does not exist
observations_input_files_raw["snv_vcf_path"] = file_does_not_exist
# WHEN checking the observation file
# THEN the file is not successfully validated and an error is returned
with pytest.raises(ValidationError):
# WHEN instantiating a ObservationsInputFiles object
MipDNAObservationsInputFiles(**observations_input_files_raw)
def test_instantiate_balsamic_input_files(balsamic_observations_input_files_raw: dict):
"""Tests input files against a pydantic BalsamicObservationsInputFiles."""
# GIVEN balsamic input files
# WHEN instantiating an observations input files object
input_files = BalsamicObservationsInputFiles(**balsamic_observations_input_files_raw)
# THEN assert that it was successfully created
assert isinstance(input_files, BalsamicObservationsInputFiles)
def test_instantiate_balsamic_input_files_missing_field(
balsamic_observations_input_files_raw: dict, file_does_not_exist: Path
):
"""Tests input files against a pydantic BalsamicObservationsInputFiles with not existent field."""
# GIVEN a dictionary with the basic input files and a file path that does not exist
balsamic_observations_input_files_raw["snv_germline_vcf_path"] = file_does_not_exist
# WHEN checking the observation file
# THEN the file is not successfully validated and an error is returned
with pytest.raises(ValidationError):
# WHEN instantiating a ObservationsInputFiles object
BalsamicObservationsInputFiles(**balsamic_observations_input_files_raw)
| [
"noreply@github.com"
] | Clinical-Genomics.noreply@github.com |
295ec14910d6eed4f63d65c9bee28ee5a0f09e02 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03101/s233463572.py | 458eadd851ed568091bfe75e21ec4e226498ce9a | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 101 | py | HW =[list(map(int,input().split())) for i in range(2)]
print((HW[0][0]-HW[1][0])*(HW[0][1]-HW[1][1])) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
f0d242c7365600fb0451f8713d71fc88924bdb39 | 0f20de2ffdac733e5c8a4116c820b1673081cf4c | /docs/source/conf.py | a0f2f5a6ff259c37eb9f4353957ab2566e6c4b3e | [
"BSD-2-Clause"
] | permissive | hdknr/flier | f13a0620bb36e8021e30ce7f1ac0337091ddf00e | eca75b972ed91c3107b5c6007366caf93313d8e8 | refs/heads/master | 2020-04-03T21:29:50.359464 | 2019-05-31T09:28:40 | 2019-05-31T09:28:40 | 20,241,076 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,530 | py | # -*- coding: utf-8 -*-
#
# flier documentation build configuration file, created by
# sphinx-quickstart on Tue Oct 13 02:34:06 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.pngmath',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'flier'
copyright = u'2015, HDKNR.COM'
author = u'HDKNR.COM'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1'
# The full version, including alpha/beta/rc tags.
release = '1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'ja'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'flierdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'flier.tex', u'flier Documentation',
u'HDKNR.COM', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'flier', u'flier Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'flier', u'flier Documentation',
author, 'flier', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
#epub_basename = project
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
######################################################################
# exlucdes
exclude_patterns = [
'models/*.models.*.rst',
'*/include.*.rst',
]
todo_include_todos = True
# Django Project
SRC_DIR = os.path.dirname(os.path.abspath(__file__))
DOC_DIR = os.path.dirname(SRC_DIR)
PRJ_DIR = os.path.join(os.path.dirname(DOC_DIR), 'sample')
sys.path.insert(0, PRJ_DIR)
os.environ['DJANGO_SETTINGS_MODULE'] = 'app.settings'
# ReadTheDocs Theme
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# http://docs.sphinx-users.jp/theming.html
html_theme = 'bizstyle'
# blockdiag
extensions += ['sphinxcontrib.blockdiag']
blockdiag_fontpath = '/usr/share/fonts/truetype/IPAfont00303/ipagp.ttf'
def setup(app):
''' SETUP '''
from django.core.wsgi import get_wsgi_application
get_wsgi_application()
from app.sphinx import process_docstring
# Register the docstring processor with sphinx
app.connect('autodoc-process-docstring', process_docstring)
| [
"gmail@hdknr.com"
] | gmail@hdknr.com |
020d409514f60d8be97a9d22ef159566ead914e9 | 11b5de6bc38f1cf415ee2b743ce6e7da70e8ede3 | /bin/split-seqs-by-id | dadb98424ce1875063d78de427e1ce7f1f3ff1ca | [
"MIT"
] | permissive | mkcor/bioinformatics-hacks | b21c6e3e3de4a1e28e1b2da754bf186a3faeb088 | 8f0894b8a0cc5595c7c1605ab3551a16e65d0f06 | refs/heads/master | 2020-07-13T22:48:17.092651 | 2019-07-24T22:38:58 | 2019-07-24T22:38:58 | 205,172,284 | 0 | 0 | NOASSERTION | 2019-08-29T13:38:32 | 2019-08-29T13:38:31 | null | UTF-8 | Python | false | false | 917 | #!/usr/bin/env python
import argparse
import logging
from Bio import SeqIO
def parse_args():
"""
return arguments
>>> args = parse_args()
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"--log", default="/dev/stderr", help="log file (default=stderr)"
)
parser.add_argument("--output", default="/dev/stdout")
parser.add_argument("--input", default="/dev/stdin")
return parser.parse_args()
def main():
"""
>>> main() # stuff happens
"""
args = parse_args()
logging.basicConfig(filename=args.log, level=logging.INFO)
with open(args.input) as handle:
records = SeqIO.parse(handle, "fasta")
for record in records:
out_file = "{}.fasta".format(record.id)
with open(out_file, "w") as output:
output.write(record.format("fasta"))
if __name__ == "__main__":
main()
| [
"harekrishna@gmail.com"
] | harekrishna@gmail.com | |
130fb9e238720cf124db67fb1ce0d4358ee70e22 | b4cf3438011c9521561143e677736c611ff19a0c | /setup.py | 41f10d1828fa03e1a62e28eb8ac19b63cc45e852 | [] | no_license | BUCT-Vision/boxx | 3e5c24af20c06d4943dc04859e6cbfb577fe8a48 | 3d405c9ad744d2ff9f6f5d9efb1e31962474565b | refs/heads/master | 2020-03-18T17:35:18.573106 | 2018-09-18T02:49:10 | 2018-09-18T02:49:10 | 135,037,392 | 2 | 0 | null | 2018-09-18T02:49:11 | 2018-05-27T10:44:44 | Python | UTF-8 | Python | false | false | 6,981 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
from datetime import date
from setuptools import setup, find_packages
# --- import your package ---
#import boxx as package
packageName = 'boxx'
pyPath = '%s/__init__.py'%packageName if os.path.isdir(packageName) else '%s.py'%packageName
with open(pyPath) as f:
lines = f.readlines()
code = ''.join(filter(lambda l: l.startswith('__') and '=' in l, lines))
class Pack():
pass
package = Pack()
package.__name__ = packageName
exec(code, package.__dict__)
if __name__ == "__main__":
# --- Automatically generate setup parameters ---
# Your package name
PKG_NAME = package.__name__
# Your GitHub user name
try:
GITHUB_USERNAME = package.__github_username__
except:
GITHUB_USERNAME = "Unknown-Github-Username"
# Short description will be the description on PyPI
try:
SHORT_DESCRIPTION = package.__short_description__ # GitHub Short Description
except:
print(
"'__short_description__' not found in '%s.__init__.py'!" % PKG_NAME)
SHORT_DESCRIPTION = "No short description!"
# Long description will be the body of content on PyPI page
try:
LONG_DESCRIPTION = open("README.md", "rb").read().decode("utf-8")
except:
LONG_DESCRIPTION = "No long description!"
# Version number, VERY IMPORTANT!
VERSION = package.__version__
# Author and Maintainer
try:
AUTHOR = package.__author__
except:
AUTHOR = "Unknown"
try:
AUTHOR_EMAIL = package.__author_email__
except:
AUTHOR_EMAIL = None
try:
MAINTAINER = package.__maintainer__
except:
MAINTAINER = "Unknown"
try:
MAINTAINER_EMAIL = package.__maintainer_email__
except:
MAINTAINER_EMAIL = None
PACKAGES, INCLUDE_PACKAGE_DATA, PACKAGE_DATA, PY_MODULES = (
None, None, None, None,
)
# It's a directory style package
if os.path.exists(__file__[:-8] + PKG_NAME):
# Include all sub packages in package directory
PACKAGES = [PKG_NAME] + ["%s.%s" % (PKG_NAME, i)
for i in find_packages(PKG_NAME)]
# Include everything in package directory
INCLUDE_PACKAGE_DATA = None
PACKAGE_DATA = {
"": ["*.*"],
}
# It's a single script style package
elif os.path.exists(__file__[:-8] + PKG_NAME + ".py"):
PY_MODULES = [PKG_NAME, ]
# The project directory name is the GitHub repository name
repository_name = os.path.basename(os.path.dirname(__file__))
# Project Url
URL = "https://github.com/{0}/{1}".format(GITHUB_USERNAME, repository_name)
# Use todays date as GitHub release tag
github_release_tag = str(date.today())
# Source code download url
DOWNLOAD_URL = "https://github.com/{0}/{1}/tarball/{2}".format(
GITHUB_USERNAME, repository_name, github_release_tag)
try:
LICENSE = package.__license__
except:
print("'__license__' not found in '%s.__init__.py'!" % PKG_NAME)
LICENSE = ""
PLATFORMS = [
"Windows",
"MacOS",
"Unix",
]
CLASSIFIERS = [
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: Microsoft :: Windows",
"Operating System :: MacOS",
"Operating System :: Unix",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
]
# Read requirements.txt, ignore comments
try:
REQUIRES = list()
f = open("requirements.txt", "rb")
for line in f.read().decode("utf-8").split("\n"):
line = line.strip()
if "#" in line:
line = line[:line.find("#")].strip()
if line:
REQUIRES.append(line)
except:
print("'requirements.txt' not found!")
REQUIRES = list()
# from boxx import *
# setup = dicto
# tree-setup(
setup(
name=PKG_NAME,
description=SHORT_DESCRIPTION,
# long_description=LONG_DESCRIPTION,
long_description=SHORT_DESCRIPTION+'\nMore information on github: https://github.com/DIYer22/boxx',
version=VERSION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
packages=PACKAGES,
include_package_data=INCLUDE_PACKAGE_DATA,
# package_data=PACKAGE_DATA,
py_modules=PY_MODULES,
url='https://github.com/DIYer22/Box-X',
download_url='https://github.com/DIYer22/Box-X/archive/master.zip',
classifiers=CLASSIFIERS,
platforms=PLATFORMS,
license=LICENSE,
install_requires=REQUIRES,
)
"""
Appendix
--------
::
Frequent used classifiers List = [
"Development Status :: 1 - Planning",
"Development Status :: 2 - Pre-Alpha",
"Development Status :: 3 - Alpha",
"Development Status :: 4 - Beta",
"Development Status :: 5 - Production/Stable",
"Development Status :: 6 - Mature",
"Development Status :: 7 - Inactive",
"Intended Audience :: Customer Service",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: End Users/Desktop",
"Intended Audience :: Financial and Insurance Industry",
"Intended Audience :: Healthcare Industry",
"Intended Audience :: Information Technology",
"Intended Audience :: Legal Industry",
"Intended Audience :: Manufacturing",
"Intended Audience :: Other Audience",
"Intended Audience :: Religion",
"Intended Audience :: Science/Research",
"Intended Audience :: System Administrators",
"Intended Audience :: Telecommunications Industry",
"License :: OSI Approved :: BSD License",
"License :: OSI Approved :: MIT License",
"License :: OSI Approved :: Apache Software License",
"License :: OSI Approved :: GNU General Public License (GPL)",
"License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)",
"Natural Language :: English",
"Natural Language :: Chinese (Simplified)",
"Operating System :: Microsoft :: Windows",
"Operating System :: MacOS",
"Operating System :: Unix",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 2 :: Only",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3 :: Only",
]
"""
| [
"ylxx@live.com"
] | ylxx@live.com |
f236269f4776ca93d30e415a57352e31115a6d08 | 63519b144e8b2d881c8f6e99b9c61aae5ab408ca | /resample_topo_file.py | ae26bfab26aea5740e8dc612baab9465dfbfedc0 | [] | no_license | kujaku11/sandbox_scripts | 667d260ef42c3fe90c9543e0a938fdb104368700 | 080003cdae3a14fec5178d3e7a854d142ef3948c | refs/heads/master | 2023-08-10T18:27:30.463398 | 2023-08-05T01:28:29 | 2023-08-05T01:28:29 | 75,033,125 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,579 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Mar 26 15:06:46 2019
@author: jpeacock
"""
import numpy as np
fn = r"c:\Users\jpeacock\Documents\Geothermal\GabbsValley\gis\gv_topo.txt"
resample = 6
with open(fn, "r") as fid:
nx = int(fid.readline().split()[1].strip())
ny = int(fid.readline().split()[1].strip())
x = float(fid.readline().split()[1].strip())
y = float(fid.readline().split()[1].strip())
cell = float(fid.readline().split()[1].strip())
nil = float(fid.readline().split()[1].strip())
topo = np.zeros((nx / resample, ny / resample), dtype=np.float)
for ii in range(ny / resample):
try:
line = fid.readline()
topo[:, ii] = np.array(line.strip().split(), dtype=np.float)[::resample]
except ValueError as error:
raise ValueError(error)
for jj in range(resample - 1):
fid.readline()
topo[np.where(topo == -9999)] = 0
with open(fn[0:-4] + "_150m.txt", "w") as nfid:
header = []
header.append("{0:14}{1:.0f}".format("ncols", topo.shape[0]))
header.append("{0:14}{1:.0f}".format("nrows", topo.shape[1]))
header.append("{0:14}{1:.11f}".format("xllcorner", x))
header.append("{0:14}{1:.11f}".format("yllcorner", y))
header.append("{0:14}{1:.11f}".format("cellsize", cell * resample))
header.append("{0:14}{1:.0f}".format("NODATA_value", nil))
nfid.write("\n".join(header))
nfid.write("\n")
for kk in range(topo.shape[1]):
out = np.char.mod("%.6g", topo[:, kk])
nfid.write(" ".join(out))
nfid.write("\n")
| [
"peacock.jared@gmail.com"
] | peacock.jared@gmail.com |
c3dfaa0899e4dab4d82c33038a74506baebc221a | 560e212b000df60325d6a3cddd225aa4af69a8f8 | /authentication/models.py | bf9cc5520e5f043e3db7e6a26072d08777b76001 | [] | no_license | msrshahrukh100/Roba-Square-Website | c36964ee08536d6cfc1e2ced99c4cc61f5c03ace | 3cfcbfc47541ae387bef9fe1e06c4046131841ba | refs/heads/master | 2021-01-19T19:31:16.947352 | 2019-05-02T12:58:44 | 2019-05-02T12:58:44 | 88,420,401 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,840 | py | from django.db import models
from django.contrib.auth.models import User
from autoslug import AutoSlugField
from authentication.username import get_user_name
from django.dispatch.dispatcher import receiver
from django.db.models.signals import post_save
from django.core.urlresolvers import reverse
from sorl.thumbnail import ImageField
from django.core.cache import cache
from django.contrib.auth.signals import user_logged_in, user_logged_out
def clear_the_cache(sender, user, request, **kwargs):
cache.clear()
user_logged_in.connect(clear_the_cache)
user_logged_out.connect(clear_the_cache)
# upload location for user profile pics
def upload_location_user(instance, filename) :
return "users/%s/%s" % (instance.user.id, filename)
# class for storing user information 1-1 qith the default user model
class UserInformation(models.Model) :
user = models.OneToOneField(User, on_delete=models.CASCADE, related_name='user_information')
change_profile_pic = ImageField(upload_to=upload_location_user,height_field='height_field',width_field='width_field',blank=True, null=True,default='default.jpg')
height_field = models.IntegerField(default=0)
width_field = models.IntegerField(default=0)
date_of_birth = models.CharField(max_length=20,blank=True, null=True)
phonenumber = models.CharField(max_length=15,blank=True, null=True)
profession = models.CharField(max_length=100, blank=True, null=True)
name_of_institute = models.CharField(max_length=200, blank=True, null=True)
showrecentlyviewed = models.BooleanField(default=True)
showfollowers = models.BooleanField(default=True)
showfollowing = models.BooleanField(default=True)
showdob = models.BooleanField(default=True)
slug = AutoSlugField(populate_from='user',unique=True)
def __unicode__(self) :
return str(self.user.username)
class Meta :
verbose_name = "User Information"
verbose_name_plural = "User Information"
def get_absolute_url(self):
return reverse("social:viewuser", kwargs={"slug": self.slug})
def get_image_url(self) :
if self.user.socialaccount_set.all().first() :
return self.user.socialaccount_set.all().first().get_avatar_url()
else :
return self.user.user_information.change_profile_pic
@receiver(post_save, sender=User)
def UserInformationreceiver(sender, instance, **kwargs):
UserInformation.objects.get_or_create(user=instance)
class Addresses(models.Model) :
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='addresses')
address = models.CharField(max_length=300)
city = models.CharField(max_length=30)
pincode = models.PositiveIntegerField()
nearest_landmark = models.CharField(max_length=200, null=True, blank=True)
def __unicode__(self) :
return self.user.username
def get_remove_url(self) :
return reverse("authentication:removeaddress", kwargs={'id':self.id})
| [
"msr.concordfly@gmail.com"
] | msr.concordfly@gmail.com |
3575db317ab710ec595dfe6bf58cde5c8976f25f | f5807a07ad72be79d4626ce9fe4adbf6d9f32fd8 | /base.py | 93a3986b8ab2d67b417169c5971b26987241751e | [] | no_license | 15101538237ren/papers_collecting | 74ddeb708502bf62dfdd5fd734a515e6fd73986b | 0e9c4e24a8edac6f77f27f7b1b53ea2c9069f652 | refs/heads/master | 2018-11-04T18:47:01.170290 | 2018-08-26T22:32:17 | 2018-08-26T22:32:17 | 115,414,479 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,270 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import urllib2, os
pkl_dir = 'pkl'
papers_dir = 'papers'
for dir_path in [pkl_dir, papers_dir]:
if not os.path.exists(dir_path):
os.makedirs(dir_path)
paper_years = range(2013, 2018)
ip = '10.138.232.71'
port = '80'
timeout = 20
def request_url(paper_collection_name, url):
if paper_collection_name != "icml":
proxydict = {}
proxydict['http'] = "http://%s:%s"%(ip, port)
proxy_handler = urllib2.ProxyHandler(proxydict)
opener = urllib2.build_opener(proxy_handler)
opener.addheaders = [('User-agent', 'Mozilla/5.0'), ('Accept','text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'),('Connection','keep-alive'),('Cookie','_ga=GA1.2.1314251887.1513847038; _gat=1; _gid=GA1.2.129016361.1514334078')]
urllib2.install_opener(opener)
try:
req = urllib2.Request(url)
response = urllib2.urlopen(req,timeout=timeout)
return response.read()
except urllib2.URLError, e:
print e.reason
return 0
def schedule(a,b,c):
'''''
a:已经下载的数据块
b:数据块的大小
c:远程文件的大小
'''
per = 100.0 * a * b / c
if per > 100 :
per = 100
# print '%.2f%%' % per
| [
"renhongleiz@126.com"
] | renhongleiz@126.com |
dab7bce199ef25196e9f4dfd86411d099ee56def | 080bbe77da955b3917435c25fc63b90b0f3c724e | /test/acquisition/test_input_constructors.py | 144ea489a484d666c00e808583655c1d2861ea97 | [
"MIT"
] | permissive | irinaespejo/botorch | 3d15d962ff0f5bb34fbd11b2eb7549db755af705 | e4dcf603fdaf83f0e5f8b9b392f943c89dfff7eb | refs/heads/master | 2023-07-11T18:02:11.853790 | 2021-08-19T15:57:21 | 2021-08-19T15:58:12 | 316,017,084 | 0 | 0 | MIT | 2020-11-25T18:02:11 | 2020-11-25T18:02:09 | null | UTF-8 | Python | false | false | 22,973 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from unittest import mock
import torch
from botorch.acquisition.acquisition import AcquisitionFunction
from botorch.acquisition.analytic import (
# ConstrainedExpectedImprovement,
ExpectedImprovement,
NoisyExpectedImprovement,
PosteriorMean,
UpperConfidenceBound,
)
from botorch.acquisition.input_constructors import (
acqf_input_constructor,
get_acqf_input_constructor,
get_best_f_analytic,
get_best_f_mc,
)
from botorch.acquisition.monte_carlo import (
qExpectedImprovement,
qNoisyExpectedImprovement,
qProbabilityOfImprovement,
qSimpleRegret,
qUpperConfidenceBound,
)
from botorch.acquisition.multi_objective import (
ExpectedHypervolumeImprovement,
qExpectedHypervolumeImprovement,
qNoisyExpectedHypervolumeImprovement,
)
from botorch.acquisition.multi_objective.objective import (
IdentityAnalyticMultiOutputObjective,
IdentityMCMultiOutputObjective,
WeightedMCMultiOutputObjective,
)
from botorch.acquisition.multi_objective.utils import get_default_partitioning_alpha
from botorch.acquisition.objective import LinearMCObjective
from botorch.acquisition.objective import (
ScalarizedObjective,
)
from botorch.exceptions.errors import UnsupportedError
from botorch.sampling.samplers import IIDNormalSampler, SobolQMCNormalSampler
from botorch.utils.constraints import get_outcome_constraint_transforms
from botorch.utils.containers import TrainingData
from botorch.utils.multi_objective.box_decompositions.non_dominated import (
FastNondominatedPartitioning,
NondominatedPartitioning,
)
from botorch.utils.testing import BotorchTestCase, MockModel, MockPosterior
class DummyAcquisitionFunction(AcquisitionFunction):
...
class InputConstructorBaseTestCase:
def setUp(self):
X = torch.rand(3, 2)
Y = torch.rand(3, 1)
self.bd_td = TrainingData.from_block_design(X=X, Y=Y)
self.bd_td_mo = TrainingData.from_block_design(X=X, Y=torch.rand(3, 2))
Xs = [torch.rand(2, 2), torch.rand(2, 2)]
Ys = [torch.rand(2, 1), torch.rand(2, 1)]
self.nbd_td = TrainingData(Xs=Xs, Ys=Ys)
class TestInputConstructorUtils(InputConstructorBaseTestCase, BotorchTestCase):
def test_get_best_f_analytic(self):
with self.assertRaises(NotImplementedError):
get_best_f_analytic(training_data=self.nbd_td)
best_f = get_best_f_analytic(training_data=self.bd_td)
best_f_expected = self.bd_td.Y.squeeze().max()
self.assertEqual(best_f, best_f_expected)
with self.assertRaises(NotImplementedError):
get_best_f_analytic(training_data=self.bd_td_mo)
obj = ScalarizedObjective(weights=torch.rand(2))
best_f = get_best_f_analytic(training_data=self.bd_td_mo, objective=obj)
best_f_expected = obj.evaluate(self.bd_td_mo.Y).max()
self.assertEqual(best_f, best_f_expected)
def test_get_best_f_mc(self):
with self.assertRaises(NotImplementedError):
get_best_f_mc(training_data=self.nbd_td)
best_f = get_best_f_mc(training_data=self.bd_td)
best_f_expected = self.bd_td.Y.squeeze().max()
self.assertEqual(best_f, best_f_expected)
with self.assertRaises(UnsupportedError):
get_best_f_mc(training_data=self.bd_td_mo)
obj = LinearMCObjective(weights=torch.rand(2))
best_f = get_best_f_mc(training_data=self.bd_td_mo, objective=obj)
best_f_expected = (self.bd_td_mo.Y @ obj.weights).max()
self.assertEqual(best_f, best_f_expected)
class TestAnalyticAcquisitionFunctionInputConstructors(
InputConstructorBaseTestCase, BotorchTestCase
):
def test_acqf_input_constructor(self):
with self.assertRaises(RuntimeError) as e:
get_acqf_input_constructor(DummyAcquisitionFunction)
self.assertTrue("not registered" in str(e))
with self.assertRaises(ValueError) as e:
@acqf_input_constructor(ExpectedImprovement)
class NewAcquisitionFunction(AcquisitionFunction):
...
self.assertTrue("duplicate" in str(e))
def test_construct_inputs_analytic_base(self):
c = get_acqf_input_constructor(PosteriorMean)
mock_model = mock.Mock()
kwargs = c(model=mock_model, training_data=self.bd_td)
self.assertEqual(kwargs["model"], mock_model)
self.assertIsNone(kwargs["objective"])
mock_obj = mock.Mock()
kwargs = c(model=mock_model, training_data=self.bd_td, objective=mock_obj)
self.assertEqual(kwargs["model"], mock_model)
self.assertEqual(kwargs["objective"], mock_obj)
def test_construct_inputs_best_f(self):
c = get_acqf_input_constructor(ExpectedImprovement)
mock_model = mock.Mock()
kwargs = c(model=mock_model, training_data=self.bd_td)
best_f_expected = self.bd_td.Y.squeeze().max()
self.assertEqual(kwargs["model"], mock_model)
self.assertIsNone(kwargs["objective"])
self.assertEqual(kwargs["best_f"], best_f_expected)
self.assertTrue(kwargs["maximize"])
kwargs = c(model=mock_model, training_data=self.bd_td, best_f=0.1)
self.assertEqual(kwargs["model"], mock_model)
self.assertIsNone(kwargs["objective"])
self.assertEqual(kwargs["best_f"], 0.1)
self.assertTrue(kwargs["maximize"])
def test_construct_inputs_ucb(self):
c = get_acqf_input_constructor(UpperConfidenceBound)
mock_model = mock.Mock()
kwargs = c(model=mock_model, training_data=self.bd_td)
self.assertEqual(kwargs["model"], mock_model)
self.assertIsNone(kwargs["objective"])
self.assertEqual(kwargs["beta"], 0.2)
self.assertTrue(kwargs["maximize"])
kwargs = c(model=mock_model, training_data=self.bd_td, beta=0.1, maximize=False)
self.assertEqual(kwargs["model"], mock_model)
self.assertIsNone(kwargs["objective"])
self.assertEqual(kwargs["beta"], 0.1)
self.assertFalse(kwargs["maximize"])
# def test_construct_inputs_constrained_ei(self):
# c = get_acqf_input_constructor(ConstrainedExpectedImprovement)
# mock_model = mock.Mock()
def test_construct_inputs_noisy_ei(self):
c = get_acqf_input_constructor(NoisyExpectedImprovement)
mock_model = mock.Mock()
kwargs = c(model=mock_model, training_data=self.bd_td)
self.assertEqual(kwargs["model"], mock_model)
self.assertTrue(torch.equal(kwargs["X_observed"], self.bd_td.X))
self.assertEqual(kwargs["num_fantasies"], 20)
self.assertTrue(kwargs["maximize"])
kwargs = c(
model=mock_model, training_data=self.bd_td, num_fantasies=10, maximize=False
)
self.assertEqual(kwargs["model"], mock_model)
self.assertTrue(torch.equal(kwargs["X_observed"], self.bd_td.X))
self.assertEqual(kwargs["num_fantasies"], 10)
self.assertFalse(kwargs["maximize"])
with self.assertRaisesRegex(NotImplementedError, "only block designs"):
c(model=mock_model, training_data=self.nbd_td)
class TestMCAcquisitionFunctionInputConstructors(
InputConstructorBaseTestCase, BotorchTestCase
):
def test_construct_inputs_mc_base(self):
c = get_acqf_input_constructor(qSimpleRegret)
mock_model = mock.Mock()
kwargs = c(model=mock_model, training_data=self.bd_td)
self.assertEqual(kwargs["model"], mock_model)
self.assertIsNone(kwargs["objective"])
self.assertIsNone(kwargs["X_pending"])
self.assertIsNone(kwargs["sampler"])
X_pending = torch.rand(2, 2)
objective = LinearMCObjective(torch.rand(2))
kwargs = c(
model=mock_model,
training_data=self.bd_td,
objective=objective,
X_pending=X_pending,
)
self.assertEqual(kwargs["model"], mock_model)
self.assertTrue(torch.equal(kwargs["objective"].weights, objective.weights))
self.assertTrue(torch.equal(kwargs["X_pending"], X_pending))
self.assertIsNone(kwargs["sampler"])
# TODO: Test passing through of sampler
def test_construct_inputs_qEI(self):
c = get_acqf_input_constructor(qExpectedImprovement)
mock_model = mock.Mock()
kwargs = c(model=mock_model, training_data=self.bd_td)
best_f_expected = self.bd_td.Y.squeeze().max()
self.assertEqual(kwargs["model"], mock_model)
self.assertIsNone(kwargs["objective"])
self.assertIsNone(kwargs["X_pending"])
self.assertIsNone(kwargs["sampler"])
X_pending = torch.rand(2, 2)
objective = LinearMCObjective(torch.rand(2))
kwargs = c(
model=mock_model,
training_data=self.bd_td_mo,
objective=objective,
X_pending=X_pending,
)
self.assertEqual(kwargs["model"], mock_model)
self.assertTrue(torch.equal(kwargs["objective"].weights, objective.weights))
self.assertTrue(torch.equal(kwargs["X_pending"], X_pending))
self.assertIsNone(kwargs["sampler"])
best_f_expected = objective(self.bd_td_mo.Y).max()
self.assertEqual(kwargs["best_f"], best_f_expected)
def test_construct_inputs_qNEI(self):
c = get_acqf_input_constructor(qNoisyExpectedImprovement)
mock_model = mock.Mock()
kwargs = c(model=mock_model, training_data=self.bd_td)
self.assertEqual(kwargs["model"], mock_model)
self.assertIsNone(kwargs["objective"])
self.assertIsNone(kwargs["X_pending"])
self.assertIsNone(kwargs["sampler"])
self.assertFalse(kwargs["prune_baseline"])
self.assertTrue(torch.equal(kwargs["X_baseline"], self.bd_td.X))
with self.assertRaises(NotImplementedError):
c(model=mock_model, training_data=self.nbd_td)
X_baseline = torch.rand(2, 2)
kwargs = c(
model=mock_model,
training_data=self.bd_td,
X_baseline=X_baseline,
prune_baseline=True,
)
self.assertEqual(kwargs["model"], mock_model)
self.assertIsNone(kwargs["objective"])
self.assertIsNone(kwargs["X_pending"])
self.assertIsNone(kwargs["sampler"])
self.assertTrue(kwargs["prune_baseline"])
self.assertTrue(torch.equal(kwargs["X_baseline"], X_baseline))
def test_construct_inputs_qPI(self):
c = get_acqf_input_constructor(qProbabilityOfImprovement)
mock_model = mock.Mock()
kwargs = c(model=mock_model, training_data=self.bd_td)
self.assertEqual(kwargs["model"], mock_model)
self.assertIsNone(kwargs["objective"])
self.assertIsNone(kwargs["X_pending"])
self.assertIsNone(kwargs["sampler"])
self.assertEqual(kwargs["tau"], 1e-3)
X_pending = torch.rand(2, 2)
objective = LinearMCObjective(torch.rand(2))
kwargs = c(
model=mock_model,
training_data=self.bd_td,
objective=objective,
X_pending=X_pending,
tau=1e-2,
)
self.assertEqual(kwargs["model"], mock_model)
self.assertTrue(torch.equal(kwargs["objective"].weights, objective.weights))
self.assertTrue(torch.equal(kwargs["X_pending"], X_pending))
self.assertIsNone(kwargs["sampler"])
self.assertEqual(kwargs["tau"], 1e-2)
def test_construct_inputs_qUCB(self):
c = get_acqf_input_constructor(qUpperConfidenceBound)
mock_model = mock.Mock()
kwargs = c(model=mock_model, training_data=self.bd_td)
self.assertEqual(kwargs["model"], mock_model)
self.assertIsNone(kwargs["objective"])
self.assertIsNone(kwargs["X_pending"])
self.assertIsNone(kwargs["sampler"])
self.assertEqual(kwargs["beta"], 0.2)
X_pending = torch.rand(2, 2)
objective = LinearMCObjective(torch.rand(2))
kwargs = c(
model=mock_model,
training_data=self.bd_td,
objective=objective,
X_pending=X_pending,
beta=0.1,
)
self.assertEqual(kwargs["model"], mock_model)
self.assertTrue(torch.equal(kwargs["objective"].weights, objective.weights))
self.assertTrue(torch.equal(kwargs["X_pending"], X_pending))
self.assertIsNone(kwargs["sampler"])
self.assertEqual(kwargs["beta"], 0.1)
class TestMultiObjectiveAcquisitionFunctionInputConstructors(
InputConstructorBaseTestCase, BotorchTestCase
):
def test_construct_inputs_EHVI(self):
c = get_acqf_input_constructor(ExpectedHypervolumeImprovement)
mock_model = mock.Mock()
objective_thresholds = torch.rand(6)
# test error on unsupported outcome constraints
with self.assertRaises(NotImplementedError):
c(
model=mock_model,
training_data=self.bd_td,
objective_thresholds=objective_thresholds,
outcome_constraints=mock.Mock(),
)
# test with Y_pmean supplied explicitly
Y_pmean = torch.rand(3, 6)
kwargs = c(
model=mock_model,
training_data=self.bd_td,
objective_thresholds=objective_thresholds,
Y_pmean=Y_pmean,
)
self.assertEqual(kwargs["model"], mock_model)
self.assertIsInstance(kwargs["objective"], IdentityAnalyticMultiOutputObjective)
self.assertTrue(torch.equal(kwargs["ref_point"], objective_thresholds))
partitioning = kwargs["partitioning"]
alpha_expected = get_default_partitioning_alpha(6)
self.assertIsInstance(partitioning, NondominatedPartitioning)
self.assertEqual(partitioning.alpha, alpha_expected)
self.assertTrue(torch.equal(partitioning._neg_ref_point, -objective_thresholds))
Y_pmean = torch.rand(3, 2)
objective_thresholds = torch.rand(2)
kwargs = c(
model=mock_model,
training_data=self.bd_td,
objective_thresholds=objective_thresholds,
Y_pmean=Y_pmean,
)
partitioning = kwargs["partitioning"]
self.assertIsInstance(partitioning, FastNondominatedPartitioning)
self.assertTrue(torch.equal(partitioning.ref_point, objective_thresholds))
# test with custom objective
weights = torch.rand(2)
obj = WeightedMCMultiOutputObjective(weights=weights)
kwargs = c(
model=mock_model,
training_data=self.bd_td,
objective_thresholds=objective_thresholds,
objective=obj,
Y_pmean=Y_pmean,
alpha=0.05,
)
self.assertEqual(kwargs["model"], mock_model)
self.assertIsInstance(kwargs["objective"], WeightedMCMultiOutputObjective)
ref_point_expected = objective_thresholds * weights
self.assertTrue(torch.equal(kwargs["ref_point"], ref_point_expected))
partitioning = kwargs["partitioning"]
self.assertIsInstance(partitioning, NondominatedPartitioning)
self.assertEqual(partitioning.alpha, 0.05)
self.assertTrue(torch.equal(partitioning._neg_ref_point, -ref_point_expected))
# Test without providing Y_pmean (computed from model)
mean = torch.rand(1, 2)
variance = torch.ones(1, 1)
mm = MockModel(MockPosterior(mean=mean, variance=variance))
kwargs = c(
model=mm,
training_data=self.bd_td,
objective_thresholds=objective_thresholds,
)
self.assertIsInstance(kwargs["objective"], IdentityAnalyticMultiOutputObjective)
self.assertTrue(torch.equal(kwargs["ref_point"], objective_thresholds))
partitioning = kwargs["partitioning"]
self.assertIsInstance(partitioning, FastNondominatedPartitioning)
self.assertTrue(torch.equal(partitioning.ref_point, objective_thresholds))
self.assertTrue(torch.equal(partitioning._neg_Y, -mean))
def test_construct_inputs_qEHVI(self):
c = get_acqf_input_constructor(qExpectedHypervolumeImprovement)
objective_thresholds = torch.rand(2)
# Test defaults
mean = torch.rand(1, 2)
variance = torch.ones(1, 2)
mm = MockModel(MockPosterior(mean=mean, variance=variance))
kwargs = c(
model=mm,
training_data=self.bd_td,
objective_thresholds=objective_thresholds,
)
self.assertIsInstance(kwargs["objective"], IdentityMCMultiOutputObjective)
ref_point_expected = objective_thresholds
self.assertTrue(torch.equal(kwargs["ref_point"], ref_point_expected))
partitioning = kwargs["partitioning"]
self.assertIsInstance(partitioning, FastNondominatedPartitioning)
self.assertTrue(torch.equal(partitioning.ref_point, ref_point_expected))
self.assertTrue(torch.equal(partitioning._neg_Y, -mean))
sampler = kwargs["sampler"]
self.assertIsInstance(sampler, SobolQMCNormalSampler)
self.assertEqual(sampler.sample_shape, torch.Size([128]))
self.assertIsNone(kwargs["X_pending"])
self.assertIsNone(kwargs["constraints"])
self.assertEqual(kwargs["eta"], 1e-3)
# Test outcome constraints and custom inputs
mean = torch.tensor([[1.0, 0.25], [0.5, 1.0]])
variance = torch.ones(1, 1)
mm = MockModel(MockPosterior(mean=mean, variance=variance))
weights = torch.rand(2)
obj = WeightedMCMultiOutputObjective(weights=weights)
outcome_constraints = (torch.tensor([[0.0, 1.0]]), torch.tensor([[0.5]]))
X_pending = torch.rand(1, 2)
kwargs = c(
model=mm,
training_data=self.bd_td,
objective_thresholds=objective_thresholds,
objective=obj,
outcome_constraints=outcome_constraints,
X_pending=X_pending,
alpha=0.05,
eta=1e-2,
qmc=False,
mc_samples=64,
)
self.assertIsInstance(kwargs["objective"], WeightedMCMultiOutputObjective)
ref_point_expected = objective_thresholds * weights
self.assertTrue(torch.equal(kwargs["ref_point"], ref_point_expected))
partitioning = kwargs["partitioning"]
self.assertIsInstance(partitioning, NondominatedPartitioning)
self.assertEqual(partitioning.alpha, 0.05)
self.assertTrue(torch.equal(partitioning._neg_ref_point, -ref_point_expected))
Y_expected = mean[:1] * weights
self.assertTrue(torch.equal(partitioning._neg_Y, -Y_expected))
sampler = kwargs["sampler"]
self.assertIsInstance(sampler, IIDNormalSampler)
self.assertEqual(sampler.sample_shape, torch.Size([64]))
self.assertTrue(torch.equal(kwargs["X_pending"], X_pending))
cons_tfs = kwargs["constraints"]
self.assertEqual(len(cons_tfs), 1)
cons_eval = cons_tfs[0](mean)
cons_eval_expected = torch.tensor([-0.25, 0.5])
self.assertTrue(torch.equal(cons_eval, cons_eval_expected))
self.assertEqual(kwargs["eta"], 1e-2)
# Test custom sampler
custom_sampler = SobolQMCNormalSampler(num_samples=16, seed=1234)
kwargs = c(
model=mm,
training_data=self.bd_td,
objective_thresholds=objective_thresholds,
sampler=custom_sampler,
)
sampler = kwargs["sampler"]
self.assertIsInstance(sampler, SobolQMCNormalSampler)
self.assertEqual(sampler.sample_shape, torch.Size([16]))
self.assertEqual(sampler.seed, 1234)
def test_construct_inputs_qNEHVI(self):
c = get_acqf_input_constructor(qNoisyExpectedHypervolumeImprovement)
objective_thresholds = torch.rand(2)
mock_model = mock.Mock()
# Test defaults
kwargs = c(
model=mock_model,
training_data=self.bd_td,
objective_thresholds=objective_thresholds,
)
ref_point_expected = objective_thresholds
self.assertTrue(torch.equal(kwargs["ref_point"], ref_point_expected))
self.assertTrue(torch.equal(kwargs["X_baseline"], self.bd_td.X))
self.assertIsInstance(kwargs["sampler"], SobolQMCNormalSampler)
self.assertEqual(kwargs["sampler"].sample_shape, torch.Size([128]))
self.assertIsInstance(kwargs["objective"], IdentityMCMultiOutputObjective)
self.assertIsNone(kwargs["constraints"])
self.assertIsNone(kwargs["X_pending"])
self.assertEqual(kwargs["eta"], 1e-3)
self.assertTrue(kwargs["prune_baseline"])
self.assertEqual(kwargs["alpha"], 0.0)
self.assertTrue(kwargs["cache_pending"])
self.assertEqual(kwargs["max_iep"], 0)
self.assertTrue(kwargs["incremental_nehvi"])
# Test custom inputs
weights = torch.rand(2)
objective = WeightedMCMultiOutputObjective(weights=weights)
X_baseline = torch.rand(2, 2)
sampler = IIDNormalSampler(num_samples=4)
outcome_constraints = (torch.tensor([[0.0, 1.0]]), torch.tensor([[0.5]]))
X_pending = torch.rand(1, 2)
kwargs = c(
model=mock_model,
training_data=self.bd_td,
objective_thresholds=objective_thresholds,
objective=objective,
X_baseline=X_baseline,
sampler=sampler,
outcome_constraints=outcome_constraints,
X_pending=X_pending,
eta=1e-2,
prune_baseline=True,
alpha=0.1,
cache_pending=False,
max_iep=1,
incremental_nehvi=False,
)
ref_point_expected = objective(objective_thresholds)
self.assertTrue(torch.equal(kwargs["ref_point"], ref_point_expected))
self.assertTrue(torch.equal(kwargs["X_baseline"], X_baseline))
sampler_ = kwargs["sampler"]
self.assertIsInstance(sampler_, IIDNormalSampler)
self.assertEqual(sampler_.sample_shape, torch.Size([4]))
self.assertEqual(kwargs["objective"], objective)
cons_tfs_expected = get_outcome_constraint_transforms(outcome_constraints)
cons_tfs = kwargs["constraints"]
self.assertEqual(len(cons_tfs), 1)
test_Y = torch.rand(1, 2)
self.assertTrue(torch.equal(cons_tfs[0](test_Y), cons_tfs_expected[0](test_Y)))
self.assertTrue(torch.equal(kwargs["X_pending"], X_pending))
self.assertEqual(kwargs["eta"], 1e-2)
self.assertTrue(kwargs["prune_baseline"])
self.assertEqual(kwargs["alpha"], 0.1)
self.assertFalse(kwargs["cache_pending"])
self.assertEqual(kwargs["max_iep"], 1)
self.assertFalse(kwargs["incremental_nehvi"])
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
1e9a333a9a9085b4606cf1e5bd53b40f54343772 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startCirq860.py | 2953abe4bd2707aa6a6ffda17ae295e957dbe753 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,432 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=5
# total number=42
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=3
c.append(cirq.H.on(input_qubit[1])) # number=4
c.append(cirq.H.on(input_qubit[2])) # number=5
c.append(cirq.H.on(input_qubit[3])) # number=6
c.append(cirq.H.on(input_qubit[4])) # number=21
for i in range(2):
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[2])) # number=7
c.append(cirq.H.on(input_qubit[3])) # number=8
c.append(cirq.H.on(input_qubit[0])) # number=17
c.append(cirq.H.on(input_qubit[1])) # number=18
c.append(cirq.H.on(input_qubit[2])) # number=19
c.append(cirq.H.on(input_qubit[3])) # number=20
c.append(cirq.H.on(input_qubit[0])) # number=36
c.append(cirq.CZ.on(input_qubit[1],input_qubit[0])) # number=37
c.append(cirq.H.on(input_qubit[0])) # number=38
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=39
c.append(cirq.X.on(input_qubit[0])) # number=40
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=41
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=30
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=32
c.append(cirq.X.on(input_qubit[1])) # number=33
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=34
c.append(cirq.H.on(input_qubit[2])) # number=25
c.append(cirq.CZ.on(input_qubit[0],input_qubit[2])) # number=26
c.append(cirq.H.on(input_qubit[2])) # number=35
c.append(cirq.H.on(input_qubit[2])) # number=27
c.append(cirq.X.on(input_qubit[2])) # number=23
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[2])) # number=24
c.append(cirq.X.on(input_qubit[3])) # number=12
c.append(cirq.X.on(input_qubit[0])) # number=13
c.append(cirq.X.on(input_qubit[1])) # number=14
c.append(cirq.X.on(input_qubit[2])) # number=15
c.append(cirq.X.on(input_qubit[3])) # number=16
c.append(cirq.Z.on(input_qubit[1])) # number=31
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 5
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq860.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
7210e8c2d7640f4f2c4816aa750335f3e49bef62 | b76289396b22eda191f25744a600fac2abaf8850 | /hphp/tools/lldb/lookup.py | b2b6eaff45248b34068d08137fb0a176d2b21d29 | [
"MIT",
"PHP-3.01",
"Zend-2.0"
] | permissive | fengjixuchui/hhvm | cb8cece7afd025fb8cdf8479c2a0696f38730949 | bbbb1782fa258b8dd526ffc7e8ba0f6115931bff | refs/heads/master | 2023-03-15T15:55:46.355422 | 2023-01-27T13:59:08 | 2023-01-27T13:59:08 | 175,142,159 | 0 | 1 | NOASSERTION | 2021-11-03T11:22:20 | 2019-03-12T05:34:16 | C++ | UTF-8 | Python | false | false | 4,595 | py | import lldb
import shlex
try:
# LLDB needs to load this outside of the usual Buck mechanism
import idx
import utils
except ModuleNotFoundError:
import hhvm_lldb.idx as idx
import hhvm_lldb.utils as utils
def lookup_func(func_id: lldb.SBValue) -> lldb.SBValue:
""" Find the function corresponding to a given FuncID
Args:
func_id: A HPHP::FuncId wrapped in an lldb.SBValue
Returns:
func: A HPHP::Func* wrapped in an lldb.SBValue
"""
target = func_id.target
assert func_id.type == utils.Type("HPHP::FuncId", target), f"invalid func_id, type given is {func_id.type.name}"
func_vec = utils.Global("HPHP::Func::s_funcVec", target)
if func_vec.IsValid():
# Not LowPtr
func_id_val = utils.get(func_id, "m_id").unsigned
result = idx.atomic_low_ptr_vector_at(func_vec, func_id_val)
assert result.IsValid(), "returned invalid HPHP::Func"
else:
# TODO test this code path
# LowPtr
result = utils.rawptr(utils.get(func_id, 'm_id'))
func_ptr = result.Cast(utils.Type('HPHP::Func', target).GetPointerType())
assert func_ptr.IsValid(), "couldn't return HPHP::Func *"
return func_ptr
def lookup_func_from_frame_pointer(fp: lldb.SBValue) -> lldb.SBValue:
""" Get the jitted function pointed to by the given frame pointer.
Args:
fp: Activation record (HPHP::ActRec)
Returns:
func: An SBValue representing a HPHP::Func*
"""
func_id = utils.get(fp, 'm_funcId')
return lookup_func(func_id)
class LookupCommand(utils.Command):
command = "lookup"
description = "Look up HHVM runtime objects by ID"
class ArgsNamespace: # noqa: B903
# argparse will add attributes to this class
def __init__(self, exe_ctx: lldb.SBExecutionContext, result: lldb.SBCommandReturnObject):
self.exe_ctx = exe_ctx
self.result = result
@classmethod
def create_parser(cls):
parser = cls.default_parser()
subparsers = parser.add_subparsers(title="List of lookup subcommands")
func_cmd = subparsers.add_parser(
"func",
help="Look up a Func* by its FuncId",
)
func_cmd.add_argument(
"funcid",
help="A HPHP::FuncId (i.e. int) uniquely identifying a HPHP::Func*"
)
func_cmd.set_defaults(func=cls._lookup_func_prep)
litstr_cmd = subparsers.add_parser(
"litstr",
help="Look up a litstr StringData* by its Id and Unit*",
epilog="If no Unit is given, the current unit (set by `unit`) is used.",
)
litstr_cmd.add_argument(
"id",
help="The ID of the desired StringData (i.e. an HPHP::Id)",
)
litstr_cmd.add_argument(
"unit",
nargs="?",
help="The unit to use",
)
litstr_cmd.set_defaults(func=cls._lookup_litstr_prep)
return parser
def __init__(self, debugger, internal_dict):
super().__init__(debugger, internal_dict)
def __call__(self, debugger, command, exe_ctx, result):
namespace = self.ArgsNamespace(exe_ctx, result)
command_args = shlex.split(command)
try:
options = self.parser.parse_args(command_args, namespace=namespace)
options.func(options)
except SystemExit:
result.SetError("option parsing failed")
return
@classmethod
def _lookup_func_prep(cls, options):
func_id_type = utils.Type("HPHP::FuncId", options.exe_ctx.target)
func_id = options.exe_ctx.frame.EvaluateExpression(options.funcid).Cast(func_id_type)
res = lookup_func(func_id)
if res is None:
options.result.SetError(f"cannot get function identified with FuncId {func_id}")
return
options.result.write(str(res))
@classmethod
def _lookup_litstr_prep(cls, options):
raise NotImplementedError
def __lldb_init_module(debugger, _internal_dict, top_module=""):
""" Register the commands in this file with the LLDB debugger.
Defining this in this module (in addition to the main hhvm module) allows
this script to be imported into LLDB separately; LLDB looks for a function with
this name at module load time.
Arguments:
debugger: Current debugger object
_internal_dict: Dict for current script session. For internal use by LLDB only.
Returns:
None
"""
LookupCommand.register_lldb_command(debugger, __name__, top_module)
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
e27d3ae4da61d77e608163c32861f19c4bc6f1e5 | 56bd9b3518f21080a0493f5330249bf5e85289fd | /patches_tool/aws_patch/aws_deps/libcloud/compute/drivers/joyent.py | dcf15dd66846793bd7529d7eb9cc9e240734edac | [
"Apache-2.0"
] | permissive | kevin-zhangsen/badam | da680bf8669722b5bc922381537bc4762fa5c228 | 6823f7dcd7c1b54c3b38edeffe59c16317598a2c | refs/heads/master | 2020-04-01T13:43:03.300155 | 2015-10-29T01:07:46 | 2015-10-29T01:07:46 | 45,371,347 | 2 | 0 | null | 2015-11-02T04:02:50 | 2015-11-02T04:02:47 | null | UTF-8 | Python | false | false | 8,019 | py | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Joyent Cloud (http://www.joyentcloud.com) driver.
"""
import base64
try:
import simplejson as json
except:
import json
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import b
from libcloud.common.types import LibcloudError
from libcloud.compute.providers import Provider
from libcloud.common.base import JsonResponse, ConnectionUserAndKey
from libcloud.compute.types import NodeState, InvalidCredsError
from libcloud.compute.base import Node, NodeDriver, NodeImage, NodeSize
from libcloud.utils.networking import is_private_subnet
API_HOST_SUFFIX = '.api.joyentcloud.com'
API_VERSION = '~6.5'
NODE_STATE_MAP = {
'provisioning': NodeState.PENDING,
'running': NodeState.RUNNING,
'stopping': NodeState.TERMINATED,
'stopped': NodeState.TERMINATED,
'deleted': NodeState.TERMINATED
}
VALID_REGIONS = [
'us-east-1', 'us-east-2', 'us-east-3',
'us-west-1',
'us-sw-1',
'eu-ams-1'
]
DEFAULT_REGION = 'us-east-1'
class JoyentResponse(JsonResponse):
"""
Joyent response class.
"""
valid_response_codes = [httplib.OK, httplib.ACCEPTED, httplib.CREATED,
httplib.NO_CONTENT]
def parse_error(self):
if self.status == httplib.UNAUTHORIZED:
data = self.parse_body()
raise InvalidCredsError(data['code'] + ': ' + data['message'])
return self.body
def success(self):
return self.status in self.valid_response_codes
class JoyentConnection(ConnectionUserAndKey):
"""
Joyent connection class.
"""
responseCls = JoyentResponse
allow_insecure = False
def add_default_headers(self, headers):
headers['Accept'] = 'application/json'
headers['Content-Type'] = 'application/json; charset=UTF-8'
headers['X-Api-Version'] = API_VERSION
user_b64 = base64.b64encode(b('%s:%s' % (self.user_id, self.key)))
headers['Authorization'] = 'Basic %s' % (user_b64.decode('utf-8'))
return headers
class JoyentNodeDriver(NodeDriver):
"""
Joyent node driver class.
"""
type = Provider.JOYENT
name = 'Joyent'
website = 'http://www.joyentcloud.com'
connectionCls = JoyentConnection
features = {'create_node': ['generates_password']}
def __init__(self, key, secret=None, secure=True, host=None, port=None,
region=DEFAULT_REGION, **kwargs):
# Location is here for backward compatibility reasons
if 'location' in kwargs:
region = kwargs['location']
if region not in VALID_REGIONS:
msg = 'Invalid region: "%s". Valid region: %s'
raise LibcloudError(msg % (region,
', '.join(VALID_REGIONS)), driver=self)
super(JoyentNodeDriver, self).__init__(key=key, secret=secret,
secure=secure, host=host,
port=port, region=region,
**kwargs)
self.connection.host = region + API_HOST_SUFFIX
def list_images(self):
result = self.connection.request('/my/datasets').object
images = []
for value in result:
extra = {'type': value['type'], 'urn': value['urn'],
'os': value['os'], 'default': value['default']}
image = NodeImage(id=value['id'], name=value['name'],
driver=self.connection.driver, extra=extra)
images.append(image)
return images
def list_sizes(self):
result = self.connection.request('/my/packages').object
sizes = []
for value in result:
size = NodeSize(id=value['name'], name=value['name'],
ram=value['memory'], disk=value['disk'],
bandwidth=None, price=0.0,
driver=self.connection.driver)
sizes.append(size)
return sizes
def list_nodes(self):
result = self.connection.request('/my/machines').object
nodes = []
for value in result:
node = self._to_node(value)
nodes.append(node)
return nodes
def reboot_node(self, node):
data = json.dumps({'action': 'reboot'})
result = self.connection.request('/my/machines/%s' % (node.id),
data=data, method='POST')
return result.status == httplib.ACCEPTED
def destroy_node(self, node):
result = self.connection.request('/my/machines/%s' % (node.id),
method='DELETE')
return result.status == httplib.NO_CONTENT
def create_node(self, **kwargs):
name = kwargs['name']
size = kwargs['size']
image = kwargs['image']
data = json.dumps({'name': name, 'package': size.id,
'dataset': image.id})
result = self.connection.request('/my/machines', data=data,
method='POST')
return self._to_node(result.object)
def ex_stop_node(self, node):
"""
Stop node
:param node: The node to be stopped
:type node: :class:`Node`
:rtype: ``bool``
"""
data = json.dumps({'action': 'stop'})
result = self.connection.request('/my/machines/%s' % (node.id),
data=data, method='POST')
return result.status == httplib.ACCEPTED
def ex_start_node(self, node):
"""
Start node
:param node: The node to be stopped
:type node: :class:`Node`
:rtype: ``bool``
"""
data = json.dumps({'action': 'start'})
result = self.connection.request('/my/machines/%s' % (node.id),
data=data, method='POST')
return result.status == httplib.ACCEPTED
def ex_get_node(self, node_id):
"""
Return a Node object based on a node ID.
:param node_id: ID of the node
:type node_id: ``str``
:return: A Node object for the node
:rtype: :class:`Node`
"""
result = self.connection.request('/my/machines/%s' % (node_id))
return self._to_node(result.object)
def _to_node(self, data):
state = NODE_STATE_MAP[data['state']]
public_ips = []
private_ips = []
extra = {}
for ip in data['ips']:
if is_private_subnet(ip):
private_ips.append(ip)
else:
public_ips.append(ip)
if 'credentials' in data['metadata']:
extra['password'] = data['metadata']['credentials']['root']
node = Node(id=data['id'], name=data['name'], state=state,
public_ips=public_ips, private_ips=private_ips,
driver=self.connection.driver, extra=extra)
return node
| [
"nash.xiejun@gmail.com"
] | nash.xiejun@gmail.com |
c907c4f4f301e61388810fc7e3c7e42f5ac94dc1 | 901944f407f4a06a4c4027d6139ce21165976857 | /BVAE/BVAE/BVAE_v3/BVAE.py | d10f489da3025ce358d75445e5c372e27b0ff788 | [] | no_license | chriscremer/Other_Code | a406da1d567d63bf6ef9fd5fbf0a8f177bc60b05 | 7b394fa87523803b3f4536b316df76cc44f8846e | refs/heads/master | 2021-01-17T02:34:56.215047 | 2020-05-26T13:59:05 | 2020-05-26T13:59:05 | 34,680,279 | 7 | 4 | null | null | null | null | UTF-8 | Python | false | false | 8,404 | py |
# Bayesian Variational Autoencoder
import numpy as np
import tensorflow as tf
import pickle
from os.path import expanduser
home = expanduser("~")
from utils import log_normal as log_norm
from utils import log_bernoulli as log_bern
from NN import NN
from BNN import BNN
class BVAE(object):
def __init__(self, hyperparams):
# tf.reset_default_graph()
#Model hyperparameters
self.learning_rate = hyperparams['learning_rate']
self.encoder_act_func = tf.nn.elu #tf.nn.softplus #tf.tanh
self.decoder_act_func = tf.tanh
self.encoder_net = hyperparams['encoder_net']
self.decoder_net = hyperparams['decoder_net']
self.z_size = hyperparams['z_size']
self.x_size = hyperparams['x_size']
self.rs = 0
self.n_W_particles = hyperparams['n_W_particles']
#Placeholders - Inputs/Targets
self.x = tf.placeholder(tf.float32, [None, self.x_size])
self.batch_size = tf.placeholder(tf.int32, None)
self.n_z_particles = tf.placeholder(tf.int32, None)
self.batch_frac = tf.placeholder(tf.float32, None)
# Model
encoder = NN(self.encoder_net, self.encoder_act_func, self.batch_size)
decoder = BNN(self.decoder_net, self.decoder_act_func, self.batch_size)
# q(W) # p(W)
W, log_pW, log_qW = decoder.sample_weights()
# q(z|x,W)
# p(z)
# p(x|z,W)
#Objective
self.elbo = self.objective(self.x)
# Minimize negative ELBO
self.optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate,
epsilon=1e-02).minimize(-self.elbo)
#Finalize Initilization
self.init_vars = tf.global_variables_initializer()
self.saver = tf.train.Saver()
tf.get_default_graph().finalize()
self.sess = tf.Session()
def objective(self, x):
'''
Returns scalar to maximize
'''
encoder = NN(self.encoder_net, self.encoder_act_func, self.batch_size)
decoder = BNN(self.decoder_net, self.decoder_act_func, self.batch_size)
log_pW_list = []
log_qW_list = []
log_pz_list = []
log_qz_list = []
log_px_list = []
for W_i in range(self.n_W_particles):
# Sample decoder weights __, [1], [1]
W, log_pW, log_qW = decoder.sample_weights()
# Sample z [B,Z], [B], [B]
z, log_pz, log_qz = self.sample_z(x, encoder, decoder, W)
# Decode [B,X]
y = decoder.feedforward(W, z)
# Likelihood p(x|z) [B]
log_px = log_bern(x,y)
#Store for later
log_pW_list.append(tf.reduce_mean(log_pz))
log_qW_list.append(tf.reduce_mean(log_pz))
log_pz_list.append(tf.reduce_mean(log_pz))
log_qz_list.append(tf.reduce_mean(log_pz))
log_px_list.append(tf.reduce_mean(log_px))
# Calculte log probs
self.log_px = tf.reduce_mean(tf.stack(log_px)) #over batch + W_particles + z_particles
self.log_pz = tf.reduce_mean(tf.stack(log_pz)) #over batch + z_particles
self.log_qz = tf.reduce_mean(tf.stack(log_qz)) #over batch + z_particles
self.log_pW = tf.reduce_mean(tf.stack(log_pW)) #W_particles
self.log_qW = tf.reduce_mean(tf.stack(log_qW)) #W_particles
self.z_elbo = self.log_px + self.log_pz - self.log_qz
#Calc elbo
elbo = self.log_px + self.log_pz - self.log_qz + self.batch_frac*(self.log_pW - self.log_qW)
return elbo
def sample_z(self, x, encoder, decoder, W):
'''
z: [B,Z]
log_pz: [B]
log_qz: [B]
'''
#Encode
z_mean_logvar = encoder.feedforward(x) #[B,Z*2]
z_mean = tf.slice(z_mean_logvar, [0,0], [self.batch_size, self.z_size]) #[B,Z]
z_logvar = tf.slice(z_mean_logvar, [0,self.z_size], [self.batch_size, self.z_size]) #[B,Z]
#Sample z [B,Z]
eps = tf.random_normal((self.batch_size, self.z_size), 0, 1, seed=self.rs)
z = tf.add(z_mean, tf.multiply(tf.sqrt(tf.exp(z_logvar)), eps)) #[B,Z]
# Calc log probs [B]
log_pz = log_norm(z, tf.zeros([self.batch_size, self.z_size]),
tf.log(tf.ones([self.batch_size, self.z_size])))
log_qz = log_norm(z, z_mean, z_logvar)
return z, log_pz, log_qz
def train(self, train_x, valid_x=[], display_step=5,
path_to_load_variables='', path_to_save_variables='',
epochs=10, batch_size=20, n_W_particles=2, n_z_particles=3):
'''
Train.
'''
random_seed=1
rs=np.random.RandomState(random_seed)
n_datapoints = len(train_x)
arr = np.arange(n_datapoints)
if path_to_load_variables == '':
self.sess.run(self.init_vars)
else:
#Load variables
self.saver.restore(self.sess, path_to_load_variables)
print 'loaded variables ' + path_to_load_variables
#start = time.time()
for epoch in range(epochs):
#shuffle the data
rs.shuffle(arr)
train_x = train_x[arr]
data_index = 0
for step in range(n_datapoints/batch_size):
#Make batch
batch = []
while len(batch) != batch_size:
batch.append(train_x[data_index])
data_index +=1
# Fit training using batch data
_ = self.sess.run((self.optimizer), feed_dict={self.x: batch,
self.batch_size: batch_size,
self.n_z_particles: n_z_particles,
self.batch_frac: 1./float(n_datapoints)})
# Display logs per epoch step
if step % display_step == 0:
elbo,z_elbo,log_px,log_pz,log_qz,log_pW,log_qW = self.sess.run((self.elbo, self.z_elbo,
self.log_px, self.log_pz,
self.log_qz, self.log_pW,
self.log_qW),
feed_dict={self.x: batch,
self.batch_size: batch_size,
self.n_z_particles: n_z_particles,
self.batch_frac: 1./float(n_datapoints)})
print ("Epoch", str(epoch+1)+'/'+str(epochs),
'Step:%04d' % (step+1) +'/'+ str(n_datapoints/batch_size),
"elbo={:.4f}".format(float(elbo)),
z_elbo,log_px,log_pz,log_qz,log_pW,log_qW)
if path_to_save_variables != '':
self.saver.save(self.sess, path_to_save_variables)
print 'Saved variables to ' + path_to_save_variables
if __name__ == '__main__':
x_size = 784
z_size = 10
hyperparams = {
'learning_rate': .0001,
'x_size': x_size,
'z_size': z_size,
'encoder_net': [x_size, 20, z_size*2],
'decoder_net': [z_size, 20, x_size],
'n_W_particles': 2}
model = HBAE(hyperparams)
print 'Loading data'
with open(home+'/Documents/MNIST_data/mnist.pkl','rb') as f:
mnist_data = pickle.load(f)
train_x = mnist_data[0][0]
train_y = mnist_data[0][1]
valid_x = mnist_data[1][0]
valid_y = mnist_data[1][1]
test_x = mnist_data[2][0]
test_y = mnist_data[2][1]
# path_to_load_variables=home+'/Documents/tmp/vars.ckpt'
path_to_load_variables=''
path_to_save_variables=home+'/Documents/tmp/vars2.ckpt'
print 'Training'
model.train(train_x=train_x,
epochs=50, batch_size=20, n_W_particles=1, n_z_particles=1, display_step=1000,
path_to_load_variables=path_to_load_variables,
path_to_save_variables=path_to_save_variables)
print 'Done.'
| [
"chris.a.cremer@gmail.com"
] | chris.a.cremer@gmail.com |
61d5e23c19070da8ec467f74852e7096b33ab380 | fcde32709c62b8ee86da459bb7c8eee52c848118 | /爬虫1905/day09/spider_day09_course/day09/Maoyan/Maoyan/settings.py | c4dbf33b7ab54d502aa651701fc4157d30dcb0bc | [] | no_license | klaus2015/py_base | 6b92d362c3d7dc0e09205a037f4d580381dac94d | ec32c731c1c2f6a0dab87f1d167397e4fa86b8de | refs/heads/master | 2022-07-28T15:49:30.383648 | 2020-05-11T15:31:43 | 2020-05-11T15:31:43 | 261,777,278 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,382 | py | # -*- coding: utf-8 -*-
# Scrapy settings for Maoyan project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'Maoyan'
SPIDER_MODULES = ['Maoyan.spiders']
NEWSPIDER_MODULE = 'Maoyan.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'Maoyan (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
LOG_LEVEL = 'INFO'
# 设置导出编码
FEED_EXPORT_ENCODING = 'utf-8'
# LOG_FILE = 'maoyan.log'
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
DEFAULT_REQUEST_HEADERS = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en',
'User-Agent': 'Mozilla/5.0',
}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'Maoyan.middlewares.MaoyanSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'Maoyan.middlewares.MaoyanDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'Maoyan.pipelines.MaoyanPipeline': 300,
'Maoyan.pipelines.MaoyanMysqlPipeline' : 200,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
# 定义mysql相关变量
MYSQL_HOST = 'localhost'
MYSQL_USER = 'root'
MYSQL_PWD = '123456'
MYSQL_DB = 'maoyandb'
CHARSET = 'utf8'
| [
"598467866@qq.com"
] | 598467866@qq.com |
cf4349e66001ff7285d399063205aebeb68ba443 | fe06311a7de13a02ca0be37d84c542c3cece3f33 | /Chapter38/file_38_2a.py | 8fd1b4c07a9deadcb2e9460ccbcdfe4b3dbce9c8 | [] | no_license | mooksys/Python_Algorithms | a4a84ddabc34ec4b7cc0ac01d55019880af38514 | 375817e3dfdec94411cf245fe3f685a69d92b948 | refs/heads/master | 2020-08-24T06:35:05.791979 | 2018-07-30T01:22:24 | 2018-07-30T01:22:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 230 | py | def display_line():
print("-----------------------------")
# 메인 코드
print("안녕하세요!")
display_line()
print("어떻게 지내세요?")
display_line()
print("당신의 이름은 무엇입니까?")
display_line()
| [
"jeipubmanager@gmail.com"
] | jeipubmanager@gmail.com |
fc0ff4e09563e741c446e2a0640f0afeae7c17bc | af8ccb3655f28068156fdb9cc8aafe7ea83d4f1f | /src/wheel/vendored/packaging/_manylinux.py | 2f0cc7439a00da8e418a314d587f215d757320a1 | [
"MIT"
] | permissive | pypa/wheel | abfc248c9dc8e9c909f696b2967bc2745ca38bf8 | e76040b0d970a0fe6211c31888fc929078ba02d9 | refs/heads/main | 2023-08-22T20:54:56.004832 | 2023-08-22T09:29:26 | 2023-08-22T09:29:26 | 98,346,885 | 464 | 169 | MIT | 2023-09-12T07:44:29 | 2017-07-25T20:24:25 | Python | UTF-8 | Python | false | false | 8,813 | py | import collections
import contextlib
import functools
import os
import re
import sys
import warnings
from typing import Dict, Generator, Iterator, NamedTuple, Optional, Tuple
from ._elffile import EIClass, EIData, ELFFile, EMachine
EF_ARM_ABIMASK = 0xFF000000
EF_ARM_ABI_VER5 = 0x05000000
EF_ARM_ABI_FLOAT_HARD = 0x00000400
@contextlib.contextmanager
def _parse_elf(path: str) -> Generator[Optional[ELFFile], None, None]:
try:
with open(path, "rb") as f:
yield ELFFile(f)
except (OSError, TypeError, ValueError):
yield None
def _is_linux_armhf(executable: str) -> bool:
# hard-float ABI can be detected from the ELF header of the running
# process
# https://static.docs.arm.com/ihi0044/g/aaelf32.pdf
with _parse_elf(executable) as f:
return (
f is not None
and f.capacity == EIClass.C32
and f.encoding == EIData.Lsb
and f.machine == EMachine.Arm
and f.flags & EF_ARM_ABIMASK == EF_ARM_ABI_VER5
and f.flags & EF_ARM_ABI_FLOAT_HARD == EF_ARM_ABI_FLOAT_HARD
)
def _is_linux_i686(executable: str) -> bool:
with _parse_elf(executable) as f:
return (
f is not None
and f.capacity == EIClass.C32
and f.encoding == EIData.Lsb
and f.machine == EMachine.I386
)
def _have_compatible_abi(executable: str, arch: str) -> bool:
if arch == "armv7l":
return _is_linux_armhf(executable)
if arch == "i686":
return _is_linux_i686(executable)
return arch in {"x86_64", "aarch64", "ppc64", "ppc64le", "s390x"}
# If glibc ever changes its major version, we need to know what the last
# minor version was, so we can build the complete list of all versions.
# For now, guess what the highest minor version might be, assume it will
# be 50 for testing. Once this actually happens, update the dictionary
# with the actual value.
_LAST_GLIBC_MINOR: Dict[int, int] = collections.defaultdict(lambda: 50)
class _GLibCVersion(NamedTuple):
major: int
minor: int
def _glibc_version_string_confstr() -> Optional[str]:
"""
Primary implementation of glibc_version_string using os.confstr.
"""
# os.confstr is quite a bit faster than ctypes.DLL. It's also less likely
# to be broken or missing. This strategy is used in the standard library
# platform module.
# https://github.com/python/cpython/blob/fcf1d003bf4f0100c/Lib/platform.py#L175-L183
try:
# Should be a string like "glibc 2.17".
version_string: str = getattr(os, "confstr")("CS_GNU_LIBC_VERSION")
assert version_string is not None
_, version = version_string.rsplit()
except (AssertionError, AttributeError, OSError, ValueError):
# os.confstr() or CS_GNU_LIBC_VERSION not available (or a bad value)...
return None
return version
def _glibc_version_string_ctypes() -> Optional[str]:
"""
Fallback implementation of glibc_version_string using ctypes.
"""
try:
import ctypes
except ImportError:
return None
# ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen
# manpage says, "If filename is NULL, then the returned handle is for the
# main program". This way we can let the linker do the work to figure out
# which libc our process is actually using.
#
# We must also handle the special case where the executable is not a
# dynamically linked executable. This can occur when using musl libc,
# for example. In this situation, dlopen() will error, leading to an
# OSError. Interestingly, at least in the case of musl, there is no
# errno set on the OSError. The single string argument used to construct
# OSError comes from libc itself and is therefore not portable to
# hard code here. In any case, failure to call dlopen() means we
# can proceed, so we bail on our attempt.
try:
process_namespace = ctypes.CDLL(None)
except OSError:
return None
try:
gnu_get_libc_version = process_namespace.gnu_get_libc_version
except AttributeError:
# Symbol doesn't exist -> therefore, we are not linked to
# glibc.
return None
# Call gnu_get_libc_version, which returns a string like "2.5"
gnu_get_libc_version.restype = ctypes.c_char_p
version_str: str = gnu_get_libc_version()
# py2 / py3 compatibility:
if not isinstance(version_str, str):
version_str = version_str.decode("ascii")
return version_str
def _glibc_version_string() -> Optional[str]:
"""Returns glibc version string, or None if not using glibc."""
return _glibc_version_string_confstr() or _glibc_version_string_ctypes()
def _parse_glibc_version(version_str: str) -> Tuple[int, int]:
"""Parse glibc version.
We use a regexp instead of str.split because we want to discard any
random junk that might come after the minor version -- this might happen
in patched/forked versions of glibc (e.g. Linaro's version of glibc
uses version strings like "2.20-2014.11"). See gh-3588.
"""
m = re.match(r"(?P<major>[0-9]+)\.(?P<minor>[0-9]+)", version_str)
if not m:
warnings.warn(
f"Expected glibc version with 2 components major.minor,"
f" got: {version_str}",
RuntimeWarning,
)
return -1, -1
return int(m.group("major")), int(m.group("minor"))
@functools.lru_cache()
def _get_glibc_version() -> Tuple[int, int]:
version_str = _glibc_version_string()
if version_str is None:
return (-1, -1)
return _parse_glibc_version(version_str)
# From PEP 513, PEP 600
def _is_compatible(name: str, arch: str, version: _GLibCVersion) -> bool:
sys_glibc = _get_glibc_version()
if sys_glibc < version:
return False
# Check for presence of _manylinux module.
try:
import _manylinux # noqa
except ImportError:
return True
if hasattr(_manylinux, "manylinux_compatible"):
result = _manylinux.manylinux_compatible(version[0], version[1], arch)
if result is not None:
return bool(result)
return True
if version == _GLibCVersion(2, 5):
if hasattr(_manylinux, "manylinux1_compatible"):
return bool(_manylinux.manylinux1_compatible)
if version == _GLibCVersion(2, 12):
if hasattr(_manylinux, "manylinux2010_compatible"):
return bool(_manylinux.manylinux2010_compatible)
if version == _GLibCVersion(2, 17):
if hasattr(_manylinux, "manylinux2014_compatible"):
return bool(_manylinux.manylinux2014_compatible)
return True
_LEGACY_MANYLINUX_MAP = {
# CentOS 7 w/ glibc 2.17 (PEP 599)
(2, 17): "manylinux2014",
# CentOS 6 w/ glibc 2.12 (PEP 571)
(2, 12): "manylinux2010",
# CentOS 5 w/ glibc 2.5 (PEP 513)
(2, 5): "manylinux1",
}
def platform_tags(linux: str, arch: str) -> Iterator[str]:
if not _have_compatible_abi(sys.executable, arch):
return
# Oldest glibc to be supported regardless of architecture is (2, 17).
too_old_glibc2 = _GLibCVersion(2, 16)
if arch in {"x86_64", "i686"}:
# On x86/i686 also oldest glibc to be supported is (2, 5).
too_old_glibc2 = _GLibCVersion(2, 4)
current_glibc = _GLibCVersion(*_get_glibc_version())
glibc_max_list = [current_glibc]
# We can assume compatibility across glibc major versions.
# https://sourceware.org/bugzilla/show_bug.cgi?id=24636
#
# Build a list of maximum glibc versions so that we can
# output the canonical list of all glibc from current_glibc
# down to too_old_glibc2, including all intermediary versions.
for glibc_major in range(current_glibc.major - 1, 1, -1):
glibc_minor = _LAST_GLIBC_MINOR[glibc_major]
glibc_max_list.append(_GLibCVersion(glibc_major, glibc_minor))
for glibc_max in glibc_max_list:
if glibc_max.major == too_old_glibc2.major:
min_minor = too_old_glibc2.minor
else:
# For other glibc major versions oldest supported is (x, 0).
min_minor = -1
for glibc_minor in range(glibc_max.minor, min_minor, -1):
glibc_version = _GLibCVersion(glibc_max.major, glibc_minor)
tag = "manylinux_{}_{}".format(*glibc_version)
if _is_compatible(tag, arch, glibc_version):
yield linux.replace("linux", tag)
# Handle the legacy manylinux1, manylinux2010, manylinux2014 tags.
if glibc_version in _LEGACY_MANYLINUX_MAP:
legacy_tag = _LEGACY_MANYLINUX_MAP[glibc_version]
if _is_compatible(legacy_tag, arch, glibc_version):
yield linux.replace("linux", legacy_tag)
| [
"alex.gronholm@nextday.fi"
] | alex.gronholm@nextday.fi |
8f14c453c8eb93b243a01aaf1d3cbb6e7c511f2a | 3740de0d6e43ea140fc09ab314e4c492603ba185 | /scripts/sources/S_TruncatedLFM.py | 5972eb45ef92ce35b93b076421fd884df883530c | [
"MIT"
] | permissive | s0ap/arpmRes | 29c60c65fd3e11be1cc31d46494e5b3ebf6e05ab | ddcc4de713b46e3e9dcb77cc08c502ce4df54f76 | refs/heads/master | 2022-02-16T05:01:22.118959 | 2019-08-20T16:45:02 | 2019-08-20T16:45:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,661 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.1.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # S_TruncatedLFM [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=S_TruncatedLFM&codeLang=Python)
# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=eb-lfmtrunc).
# ## Prepare the environment
# +
import os.path as path
import sys
sys.path.append(path.abspath('../../functions-legacy'))
import numpy as np
from numpy import array, zeros, diag, eye, r_
from numpy.linalg import pinv
import matplotlib.pyplot as plt
plt.style.use('seaborn')
# input parameters
n_ = 2 # target dimension
k_ = 1 # number of factors
sig2_XZ = array([[4, 9.5, -1.8], [9.5, 25, -4.5], [-1.8, -4.5, 1]]) # joint covariance of target variables and factors
# -
# ## Compute optimal loadings
# +
sig_XZ = sig2_XZ[:n_, n_:n_ + k_]
sig2_Z = sig2_XZ[n_:n_ + k_, n_:n_ + k_]
b = sig_XZ.dot(pinv(sig2_Z))
# -
# ## Compute truncated joint covariance of residuals and factors
m = r_[r_['-1', eye(n_), -b], r_['-1', zeros((k_, n_)), eye(k_)]]
sig2_UZ = m@sig2_XZ@m.T
sig2_UZtrunc = r_[r_['-1', np.diagflat(diag(sig2_UZ[:n_, :n_])), zeros((n_, k_))], r_[
'-1', zeros((k_, n_)), sig2_UZ[n_:n_ + k_, n_:n_ + k_]]]
# ## Compute truncated covariance of target variables
m_tilde = r_['-1', eye(n_), b]
sig2_Xtrunc = m_tilde@sig2_UZtrunc@m_tilde.T
| [
"dario.popadic@yahoo.com"
] | dario.popadic@yahoo.com |
60c0e73dc9ba74860ee5ee068272a5e264532ab6 | f5e567f11102f0d0f52e07d7bed1b1c41576cdd1 | /0x08-python-more_classes/3-rectangle.py | 85e83f5928cc034dd822d747b1fb095d9b123be1 | [] | no_license | AndresEscobarDev/holbertonschool-higher_level_programming | f0a39f5ae394612d9cec38e541e154a75ac43afb | 9e55e0427e631a670c0c7781e2e3819f6b50d825 | refs/heads/master | 2022-12-16T13:39:05.993853 | 2020-09-25T04:49:25 | 2020-09-25T04:49:25 | 259,411,120 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,590 | py | #!/usr/bin/python3
"""Rectangle Module"""
class Rectangle():
"""Empty Rectangle class."""
def __init__(self, width=0, height=0):
self.width = width
self.height = height
@property
def width(self):
"""retrieves the width"""
return self.__width
@width.setter
def width(self, value):
"""sets the width"""
if type(value) not in [int, float]:
raise TypeError("width must be an integer")
if value < 0:
raise ValueError("width must be >= 0")
self.__width = value
@property
def height(self):
"""retrieves the height"""
return self.__height
@height.setter
def height(self, value):
"""sets the height"""
if type(value) not in [int, float]:
raise TypeError("height must be an integer")
if value < 0:
raise ValueError("height must be >= 0")
self.__height = value
def area(self):
"""Return the rectangle area"""
return self.__height * self.__width
def perimeter(self):
"""Return the rectangle perimeter"""
if self.__height == 0 or self.__width == 0:
return 0
return 2 * (self.__height + self.__width)
def __str__(self):
"""Create the string for the print statement"""
string = ""
if self.__width == 0 or self.__height == 0:
return ''
for i in range(self.height):
for i in range(self.width):
string += '#'
string += '\n'
return string[:-1]
| [
"felipeescobar15@gmail.com"
] | felipeescobar15@gmail.com |
cd8f8dd9bf4d6c67daa0fb89300d6ddc0c1b004d | 01733042e84a768b77f64ec24118d0242b2f13b8 | /ixnetwork_restpy/testplatform/sessions/ixnetwork/statistics/view/view.py | f6edfd301e9a0b8c6ac746ca63a16dfed45e836f | [
"MIT"
] | permissive | slieberth/ixnetwork_restpy | e95673905854bc57e56177911cb3853c7e4c5e26 | 23eeb24b21568a23d3f31bbd72814ff55eb1af44 | refs/heads/master | 2023-01-04T06:57:17.513612 | 2020-10-16T22:30:55 | 2020-10-16T22:30:55 | 311,959,027 | 0 | 0 | NOASSERTION | 2020-11-11T12:15:34 | 2020-11-11T12:06:00 | null | UTF-8 | Python | false | false | 40,901 | py | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class View(Base):
"""The root node for all statistics view per 5.40 SV API.
The View class encapsulates a list of view resources that are managed by the user.
A list of resources can be retrieved from the server using the View.find() method.
The list can be managed by using the View.add() and View.remove() methods.
"""
__slots__ = ()
_SDM_NAME = 'view'
_SDM_ATT_MAP = {
'AutoRefresh': 'autoRefresh',
'AutoUpdate': 'autoUpdate',
'AvailableStatsSelectorColumns': 'availableStatsSelectorColumns',
'Caption': 'caption',
'CsvFileName': 'csvFileName',
'EnableCsvLogging': 'enableCsvLogging',
'Enabled': 'enabled',
'EnabledStatsSelectorColumns': 'enabledStatsSelectorColumns',
'OnDemandRefreshView': 'onDemandRefreshView',
'PageTimeout': 'pageTimeout',
'ReadOnly': 'readOnly',
'StatsSelectorManager': 'statsSelectorManager',
'TimeSeries': 'timeSeries',
'TreeViewNodeName': 'treeViewNodeName',
'Type': 'type',
'TypeDescription': 'typeDescription',
'ViewCategory': 'viewCategory',
'Visible': 'visible',
}
def __init__(self, parent):
super(View, self).__init__(parent)
@property
def AdvancedCVFilters(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.statistics.view.advancedcvfilters.advancedcvfilters.AdvancedCVFilters): An instance of the AdvancedCVFilters class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.statistics.view.advancedcvfilters.advancedcvfilters import AdvancedCVFilters
return AdvancedCVFilters(self)
@property
def AvailableAdvancedFilters(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.statistics.view.availableadvancedfilters.availableadvancedfilters.AvailableAdvancedFilters): An instance of the AvailableAdvancedFilters class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.statistics.view.availableadvancedfilters.availableadvancedfilters import AvailableAdvancedFilters
return AvailableAdvancedFilters(self)
@property
def AvailablePortFilter(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.statistics.view.availableportfilter.availableportfilter.AvailablePortFilter): An instance of the AvailablePortFilter class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.statistics.view.availableportfilter.availableportfilter import AvailablePortFilter
return AvailablePortFilter(self)
@property
def AvailableProtocolFilter(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.statistics.view.availableprotocolfilter.availableprotocolfilter.AvailableProtocolFilter): An instance of the AvailableProtocolFilter class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.statistics.view.availableprotocolfilter.availableprotocolfilter import AvailableProtocolFilter
return AvailableProtocolFilter(self)
@property
def AvailableProtocolStackFilter(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.statistics.view.availableprotocolstackfilter.availableprotocolstackfilter.AvailableProtocolStackFilter): An instance of the AvailableProtocolStackFilter class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.statistics.view.availableprotocolstackfilter.availableprotocolstackfilter import AvailableProtocolStackFilter
return AvailableProtocolStackFilter(self)
@property
def AvailableStatisticFilter(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.statistics.view.availablestatisticfilter.availablestatisticfilter.AvailableStatisticFilter): An instance of the AvailableStatisticFilter class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.statistics.view.availablestatisticfilter.availablestatisticfilter import AvailableStatisticFilter
return AvailableStatisticFilter(self)
@property
def AvailableTrackingFilter(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.statistics.view.availabletrackingfilter.availabletrackingfilter.AvailableTrackingFilter): An instance of the AvailableTrackingFilter class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.statistics.view.availabletrackingfilter.availabletrackingfilter import AvailableTrackingFilter
return AvailableTrackingFilter(self)
@property
def AvailableTrafficItemFilter(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.statistics.view.availabletrafficitemfilter.availabletrafficitemfilter.AvailableTrafficItemFilter): An instance of the AvailableTrafficItemFilter class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.statistics.view.availabletrafficitemfilter.availabletrafficitemfilter import AvailableTrafficItemFilter
return AvailableTrafficItemFilter(self)
@property
def Data(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.statistics.view.data.data.Data): An instance of the Data class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.statistics.view.data.data import Data
return Data(self)._select()
@property
def DrillDown(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.statistics.view.drilldown.drilldown.DrillDown): An instance of the DrillDown class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.statistics.view.drilldown.drilldown import DrillDown
return DrillDown(self)
@property
def FormulaCatalog(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.statistics.view.formulacatalog.formulacatalog.FormulaCatalog): An instance of the FormulaCatalog class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.statistics.view.formulacatalog.formulacatalog import FormulaCatalog
return FormulaCatalog(self)._select()
@property
def InnerGlobalStats(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.statistics.view.innerglobalstats.innerglobalstats.InnerGlobalStats): An instance of the InnerGlobalStats class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.statistics.view.innerglobalstats.innerglobalstats import InnerGlobalStats
return InnerGlobalStats(self)._select()
@property
def Layer23NextGenProtocolFilter(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.statistics.view.layer23nextgenprotocolfilter.layer23nextgenprotocolfilter.Layer23NextGenProtocolFilter): An instance of the Layer23NextGenProtocolFilter class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.statistics.view.layer23nextgenprotocolfilter.layer23nextgenprotocolfilter import Layer23NextGenProtocolFilter
return Layer23NextGenProtocolFilter(self)
@property
def Layer23ProtocolAuthAccessFilter(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.statistics.view.layer23protocolauthaccessfilter.layer23protocolauthaccessfilter.Layer23ProtocolAuthAccessFilter): An instance of the Layer23ProtocolAuthAccessFilter class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.statistics.view.layer23protocolauthaccessfilter.layer23protocolauthaccessfilter import Layer23ProtocolAuthAccessFilter
return Layer23ProtocolAuthAccessFilter(self)
@property
def Layer23ProtocolPortFilter(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.statistics.view.layer23protocolportfilter.layer23protocolportfilter.Layer23ProtocolPortFilter): An instance of the Layer23ProtocolPortFilter class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.statistics.view.layer23protocolportfilter.layer23protocolportfilter import Layer23ProtocolPortFilter
return Layer23ProtocolPortFilter(self)
@property
def Layer23ProtocolRoutingFilter(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.statistics.view.layer23protocolroutingfilter.layer23protocolroutingfilter.Layer23ProtocolRoutingFilter): An instance of the Layer23ProtocolRoutingFilter class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.statistics.view.layer23protocolroutingfilter.layer23protocolroutingfilter import Layer23ProtocolRoutingFilter
return Layer23ProtocolRoutingFilter(self)
@property
def Layer23ProtocolStackFilter(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.statistics.view.layer23protocolstackfilter.layer23protocolstackfilter.Layer23ProtocolStackFilter): An instance of the Layer23ProtocolStackFilter class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.statistics.view.layer23protocolstackfilter.layer23protocolstackfilter import Layer23ProtocolStackFilter
return Layer23ProtocolStackFilter(self)
@property
def Layer23TrafficFlowDetectiveFilter(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.statistics.view.layer23trafficflowdetectivefilter.layer23trafficflowdetectivefilter.Layer23TrafficFlowDetectiveFilter): An instance of the Layer23TrafficFlowDetectiveFilter class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.statistics.view.layer23trafficflowdetectivefilter.layer23trafficflowdetectivefilter import Layer23TrafficFlowDetectiveFilter
return Layer23TrafficFlowDetectiveFilter(self)
@property
def Layer23TrafficFlowFilter(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.statistics.view.layer23trafficflowfilter.layer23trafficflowfilter.Layer23TrafficFlowFilter): An instance of the Layer23TrafficFlowFilter class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.statistics.view.layer23trafficflowfilter.layer23trafficflowfilter import Layer23TrafficFlowFilter
return Layer23TrafficFlowFilter(self)
@property
def Layer23TrafficItemFilter(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.statistics.view.layer23trafficitemfilter.layer23trafficitemfilter.Layer23TrafficItemFilter): An instance of the Layer23TrafficItemFilter class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.statistics.view.layer23trafficitemfilter.layer23trafficitemfilter import Layer23TrafficItemFilter
return Layer23TrafficItemFilter(self)
@property
def Layer23TrafficPortFilter(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.statistics.view.layer23trafficportfilter.layer23trafficportfilter.Layer23TrafficPortFilter): An instance of the Layer23TrafficPortFilter class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.statistics.view.layer23trafficportfilter.layer23trafficportfilter import Layer23TrafficPortFilter
return Layer23TrafficPortFilter(self)
@property
def Layer47AppLibraryTrafficFilter(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.statistics.view.layer47applibrarytrafficfilter.layer47applibrarytrafficfilter.Layer47AppLibraryTrafficFilter): An instance of the Layer47AppLibraryTrafficFilter class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.statistics.view.layer47applibrarytrafficfilter.layer47applibrarytrafficfilter import Layer47AppLibraryTrafficFilter
return Layer47AppLibraryTrafficFilter(self)
@property
def Page(self):
"""DEPRECATED
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.statistics.view.page.page.Page): An instance of the Page class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.statistics.view.page.page import Page
return Page(self)._select()
@property
def Statistic(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.statistics.view.statistic.statistic.Statistic): An instance of the Statistic class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.statistics.view.statistic.statistic import Statistic
return Statistic(self)
@property
def AutoRefresh(self):
"""DEPRECATED
Returns
-------
- bool: If true, automatically refreshes the statistics values. Default = true
"""
return self._get_attribute(self._SDM_ATT_MAP['AutoRefresh'])
@AutoRefresh.setter
def AutoRefresh(self, value):
self._set_attribute(self._SDM_ATT_MAP['AutoRefresh'], value)
@property
def AutoUpdate(self):
"""
Returns
-------
- bool: If true, automatically refreshes the statistics values. Default = true
"""
return self._get_attribute(self._SDM_ATT_MAP['AutoUpdate'])
@AutoUpdate.setter
def AutoUpdate(self, value):
self._set_attribute(self._SDM_ATT_MAP['AutoUpdate'], value)
@property
def AvailableStatsSelectorColumns(self):
"""
Returns
-------
- list(str): Columns available to be added from Stat Selector Manager
"""
return self._get_attribute(self._SDM_ATT_MAP['AvailableStatsSelectorColumns'])
@property
def Caption(self):
"""
Returns
-------
- str: This is the name that will appear in the GUI stats view window header or in the added view tree from tcl. The caption must be unique.
"""
return self._get_attribute(self._SDM_ATT_MAP['Caption'])
@Caption.setter
def Caption(self, value):
self._set_attribute(self._SDM_ATT_MAP['Caption'], value)
@property
def CsvFileName(self):
"""
Returns
-------
- str: Specifies the file name which is used by the CSV Logging feature. The default value is the caption of the view.
"""
return self._get_attribute(self._SDM_ATT_MAP['CsvFileName'])
@CsvFileName.setter
def CsvFileName(self, value):
self._set_attribute(self._SDM_ATT_MAP['CsvFileName'], value)
@property
def EnableCsvLogging(self):
"""
Returns
-------
- bool: If the CSV Logging feature is enabled the statistics values from a view will be written in a comma separated value format.
"""
return self._get_attribute(self._SDM_ATT_MAP['EnableCsvLogging'])
@EnableCsvLogging.setter
def EnableCsvLogging(self, value):
self._set_attribute(self._SDM_ATT_MAP['EnableCsvLogging'], value)
@property
def Enabled(self):
"""
Returns
-------
- bool: If true, enables the view that is created from the tcl script.
"""
return self._get_attribute(self._SDM_ATT_MAP['Enabled'])
@Enabled.setter
def Enabled(self, value):
self._set_attribute(self._SDM_ATT_MAP['Enabled'], value)
@property
def EnabledStatsSelectorColumns(self):
"""
Returns
-------
- list(str): Columns added from Stat Selector Manager
"""
return self._get_attribute(self._SDM_ATT_MAP['EnabledStatsSelectorColumns'])
@EnabledStatsSelectorColumns.setter
def EnabledStatsSelectorColumns(self, value):
self._set_attribute(self._SDM_ATT_MAP['EnabledStatsSelectorColumns'], value)
@property
def OnDemandRefreshView(self):
"""
Returns
-------
- bool:
"""
return self._get_attribute(self._SDM_ATT_MAP['OnDemandRefreshView'])
@property
def PageTimeout(self):
"""
Returns
-------
- number: The statistics view page is timed out based on the time specified. default = 25,000 ms
"""
return self._get_attribute(self._SDM_ATT_MAP['PageTimeout'])
@PageTimeout.setter
def PageTimeout(self, value):
self._set_attribute(self._SDM_ATT_MAP['PageTimeout'], value)
@property
def ReadOnly(self):
"""
Returns
-------
- bool: The default views created by the application will have this attribute set to false. Tcl SV created by user has this value set to true. Based on this attribute value, the user is allowed to modify the SV attributes.
"""
return self._get_attribute(self._SDM_ATT_MAP['ReadOnly'])
@property
def StatsSelectorManager(self):
"""
Returns
-------
- bool: Flag that denotes whether Stats Selector Manager is enabled for this view or not
"""
return self._get_attribute(self._SDM_ATT_MAP['StatsSelectorManager'])
@property
def TimeSeries(self):
"""
Returns
-------
- bool: If false, then it displays non-timeseries grid views. If true, displays, timeseries line chart view. default = false (non-timeseries)
"""
return self._get_attribute(self._SDM_ATT_MAP['TimeSeries'])
@TimeSeries.setter
def TimeSeries(self, value):
self._set_attribute(self._SDM_ATT_MAP['TimeSeries'], value)
@property
def TreeViewNodeName(self):
"""
Returns
-------
- str: Displays the name of the tree view node.
"""
return self._get_attribute(self._SDM_ATT_MAP['TreeViewNodeName'])
@TreeViewNodeName.setter
def TreeViewNodeName(self, value):
self._set_attribute(self._SDM_ATT_MAP['TreeViewNodeName'], value)
@property
def Type(self):
"""
Returns
-------
- str(layer23NextGenProtocol | layer23ProtocolAuthAccess | layer23ProtocolPort | layer23ProtocolRouting | layer23ProtocolStack | layer23TrafficFlow | layer23TrafficFlowDetective | layer23TrafficItem | layer23TrafficPort | layer47AppLibraryTraffic | sVReadOnly): The type of view the user wants to create from tcl.
"""
return self._get_attribute(self._SDM_ATT_MAP['Type'])
@Type.setter
def Type(self, value):
self._set_attribute(self._SDM_ATT_MAP['Type'], value)
@property
def TypeDescription(self):
"""
Returns
-------
- str: If true, desribes the type
"""
return self._get_attribute(self._SDM_ATT_MAP['TypeDescription'])
@property
def ViewCategory(self):
"""
Returns
-------
- str(ClassicProtocol | L23Traffic | L47Traffic | Mixed | NextGenProtocol | PerSession | Unknown): Returns the category of the view based on the type of statistics displayed by the view.
"""
return self._get_attribute(self._SDM_ATT_MAP['ViewCategory'])
@property
def Visible(self):
"""
Returns
-------
- bool: If true, displays the custom created tcl SVs in the SV tree under TCL Views node.
"""
return self._get_attribute(self._SDM_ATT_MAP['Visible'])
@Visible.setter
def Visible(self, value):
self._set_attribute(self._SDM_ATT_MAP['Visible'], value)
def update(self, AutoRefresh=None, AutoUpdate=None, Caption=None, CsvFileName=None, EnableCsvLogging=None, Enabled=None, EnabledStatsSelectorColumns=None, PageTimeout=None, TimeSeries=None, TreeViewNodeName=None, Type=None, Visible=None):
"""Updates view resource on the server.
Args
----
- AutoRefresh (bool): If true, automatically refreshes the statistics values. Default = true
- AutoUpdate (bool): If true, automatically refreshes the statistics values. Default = true
- Caption (str): This is the name that will appear in the GUI stats view window header or in the added view tree from tcl. The caption must be unique.
- CsvFileName (str): Specifies the file name which is used by the CSV Logging feature. The default value is the caption of the view.
- EnableCsvLogging (bool): If the CSV Logging feature is enabled the statistics values from a view will be written in a comma separated value format.
- Enabled (bool): If true, enables the view that is created from the tcl script.
- EnabledStatsSelectorColumns (list(str)): Columns added from Stat Selector Manager
- PageTimeout (number): The statistics view page is timed out based on the time specified. default = 25,000 ms
- TimeSeries (bool): If false, then it displays non-timeseries grid views. If true, displays, timeseries line chart view. default = false (non-timeseries)
- TreeViewNodeName (str): Displays the name of the tree view node.
- Type (str(layer23NextGenProtocol | layer23ProtocolAuthAccess | layer23ProtocolPort | layer23ProtocolRouting | layer23ProtocolStack | layer23TrafficFlow | layer23TrafficFlowDetective | layer23TrafficItem | layer23TrafficPort | layer47AppLibraryTraffic | sVReadOnly)): The type of view the user wants to create from tcl.
- Visible (bool): If true, displays the custom created tcl SVs in the SV tree under TCL Views node.
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(self, AutoRefresh=None, AutoUpdate=None, Caption=None, CsvFileName=None, EnableCsvLogging=None, Enabled=None, EnabledStatsSelectorColumns=None, PageTimeout=None, TimeSeries=None, TreeViewNodeName=None, Type=None, Visible=None):
"""Adds a new view resource on the server and adds it to the container.
Args
----
- AutoRefresh (bool): If true, automatically refreshes the statistics values. Default = true
- AutoUpdate (bool): If true, automatically refreshes the statistics values. Default = true
- Caption (str): This is the name that will appear in the GUI stats view window header or in the added view tree from tcl. The caption must be unique.
- CsvFileName (str): Specifies the file name which is used by the CSV Logging feature. The default value is the caption of the view.
- EnableCsvLogging (bool): If the CSV Logging feature is enabled the statistics values from a view will be written in a comma separated value format.
- Enabled (bool): If true, enables the view that is created from the tcl script.
- EnabledStatsSelectorColumns (list(str)): Columns added from Stat Selector Manager
- PageTimeout (number): The statistics view page is timed out based on the time specified. default = 25,000 ms
- TimeSeries (bool): If false, then it displays non-timeseries grid views. If true, displays, timeseries line chart view. default = false (non-timeseries)
- TreeViewNodeName (str): Displays the name of the tree view node.
- Type (str(layer23NextGenProtocol | layer23ProtocolAuthAccess | layer23ProtocolPort | layer23ProtocolRouting | layer23ProtocolStack | layer23TrafficFlow | layer23TrafficFlowDetective | layer23TrafficItem | layer23TrafficPort | layer47AppLibraryTraffic | sVReadOnly)): The type of view the user wants to create from tcl.
- Visible (bool): If true, displays the custom created tcl SVs in the SV tree under TCL Views node.
Returns
-------
- self: This instance with all currently retrieved view resources using find and the newly added view resources available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
def remove(self):
"""Deletes all the contained view resources in this instance from the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, AutoRefresh=None, AutoUpdate=None, AvailableStatsSelectorColumns=None, Caption=None, CsvFileName=None, EnableCsvLogging=None, Enabled=None, EnabledStatsSelectorColumns=None, OnDemandRefreshView=None, PageTimeout=None, ReadOnly=None, StatsSelectorManager=None, TimeSeries=None, TreeViewNodeName=None, Type=None, TypeDescription=None, ViewCategory=None, Visible=None):
"""Finds and retrieves view resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve view resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all view resources from the server.
Args
----
- AutoRefresh (bool): If true, automatically refreshes the statistics values. Default = true
- AutoUpdate (bool): If true, automatically refreshes the statistics values. Default = true
- AvailableStatsSelectorColumns (list(str)): Columns available to be added from Stat Selector Manager
- Caption (str): This is the name that will appear in the GUI stats view window header or in the added view tree from tcl. The caption must be unique.
- CsvFileName (str): Specifies the file name which is used by the CSV Logging feature. The default value is the caption of the view.
- EnableCsvLogging (bool): If the CSV Logging feature is enabled the statistics values from a view will be written in a comma separated value format.
- Enabled (bool): If true, enables the view that is created from the tcl script.
- EnabledStatsSelectorColumns (list(str)): Columns added from Stat Selector Manager
- OnDemandRefreshView (bool):
- PageTimeout (number): The statistics view page is timed out based on the time specified. default = 25,000 ms
- ReadOnly (bool): The default views created by the application will have this attribute set to false. Tcl SV created by user has this value set to true. Based on this attribute value, the user is allowed to modify the SV attributes.
- StatsSelectorManager (bool): Flag that denotes whether Stats Selector Manager is enabled for this view or not
- TimeSeries (bool): If false, then it displays non-timeseries grid views. If true, displays, timeseries line chart view. default = false (non-timeseries)
- TreeViewNodeName (str): Displays the name of the tree view node.
- Type (str(layer23NextGenProtocol | layer23ProtocolAuthAccess | layer23ProtocolPort | layer23ProtocolRouting | layer23ProtocolStack | layer23TrafficFlow | layer23TrafficFlowDetective | layer23TrafficItem | layer23TrafficPort | layer47AppLibraryTraffic | sVReadOnly)): The type of view the user wants to create from tcl.
- TypeDescription (str): If true, desribes the type
- ViewCategory (str(ClassicProtocol | L23Traffic | L47Traffic | Mixed | NextGenProtocol | PerSession | Unknown)): Returns the category of the view based on the type of statistics displayed by the view.
- Visible (bool): If true, displays the custom created tcl SVs in the SV tree under TCL Views node.
Returns
-------
- self: This instance with matching view resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of view data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the view resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def DoDrillDownByOption(self, *args, **kwargs):
"""Executes the doDrillDownByOption operation on the server.
doDrillDownByOption(Arg2=number, Arg3=string)href
-------------------------------------------------
- Arg2 (number):
- Arg3 (str):
- Returns str(None):
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('doDrillDownByOption', payload=payload, response_object=None)
def ExportData(self, *args, **kwargs):
"""Executes the exportData operation on the server.
Exports the data seen in a view to a file. Supported formats are .html, .xml, .xls and .txt.
exportData(FilePathName=string)string
-------------------------------------
- FilePathName (str): The path where the exported file should be written.
- Returns str: This can be either a success message or a description of the problem if any error occurred.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('exportData', payload=payload, response_object=None)
def GetAvailableDrillDownOptions(self, *args, **kwargs):
"""Executes the getAvailableDrillDownOptions operation on the server.
getAvailableDrillDownOptions(Arg2=number)list
---------------------------------------------
- Arg2 (number):
- Returns list(str):
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('getAvailableDrillDownOptions', payload=payload, response_object=None)
def GetColumnValues(self, *args, **kwargs):
"""Executes the getColumnValues operation on the server.
Retrieves the requested column values.
getColumnValues(Arg2=string)object
----------------------------------
- Arg2 (str): The name of the column for which to retrieve statistics.
- Returns dict(arg1:list[str],arg2:str): An array with the values retrieved.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('getColumnValues', payload=payload, response_object=None)
def GetResultsPath(self):
"""Executes the getResultsPath operation on the server.
Gets the path where the results for the current tests are stored.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
return self._execute('getResultsPath', payload=payload, response_object=None)
def GetRowValues(self, *args, **kwargs):
"""Executes the getRowValues operation on the server.
Retrieves the requested row values.
getRowValues(Arg2=string)object
-------------------------------
- Arg2 (str): The label identifying the row for which to retrieve statistics. It is formed from the values of the row label columns concatenated using | delimiter. Row label columns appear with orange or yellow names in the view.
- Returns dict(arg1:list[str],arg2:str): An array with the values retrieved.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('getRowValues', payload=payload, response_object=None)
def GetValue(self, *args, **kwargs):
"""Executes the getValue operation on the server.
Retrieves the requested statistical data.
getValue(Arg2=string, Arg3=string)string
----------------------------------------
- Arg2 (str): The label identifying the row for which to retrieve statistics. It is formed from the values of the row label columns concatenated using | delimiter. Row label columns appear with orange or yellow names in the view.
- Arg3 (str): The name of the column for which to retrieve statistics.
- Returns str: The retrieved value.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('getValue', payload=payload, response_object=None)
def Refresh(self):
"""Executes the refresh operation on the server.
Refreshes the existing values in the view with the new values. If the view is NGPF on demand, the refresh will get new values for all NGPF on demand views.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
return self._execute('refresh', payload=payload, response_object=None)
def RestoreToDefaults(self):
"""Executes the restoreToDefaults operation on the server.
NOT DEFINED
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
return self._execute('restoreToDefaults', payload=payload, response_object=None)
| [
"andy.balogh@keysight.com"
] | andy.balogh@keysight.com |
f8bbd93b19a00ccaa1785f21de10d7ee85c0d2f2 | eeea27393aa91ded7452f8ba3b9e59de34b352e3 | /tests/test_fetch_branch_descriptions.py | 0c9edc9ee8d800b318df303b75bfb9e8f496f979 | [
"MIT"
] | permissive | timbrel/GitSavvy | 021654f9dba690601f2a2ea4696230ac3c14dc88 | 7f6eae583ba4a38749b14a6e348c6d4fcf6811e8 | refs/heads/master | 2023-09-05T20:02:14.775466 | 2023-07-18T11:17:15 | 2023-07-18T11:17:15 | 29,417,074 | 174 | 32 | MIT | 2023-07-29T15:56:38 | 2015-01-18T05:40:08 | Python | UTF-8 | Python | false | false | 1,247 | py | from textwrap import dedent
from unittesting import DeferrableTestCase
from GitSavvy.tests.mockito import when
from GitSavvy.tests.parameterized import parameterized as p
from GitSavvy.core.git_command import GitCommand
examples = [
(
dedent("""\
branch.status-bar-updater.description One\\nTwo
branch.revert-o-behavior.description Another branch.asd.description
branch.opt-fetching-descriptions.description This is the subject
And here is more text
and even more
branch.description.description Another description
""".rstrip()),
{
"status-bar-updater": "One\\nTwo",
"revert-o-behavior": "Another branch.asd.description",
"opt-fetching-descriptions": "This is the subject",
"description": "Another description"
}
),
]
class TestFetchBranchDescriptions(DeferrableTestCase):
@p.expand(examples)
def test_description_subjects(self, git_output, expected):
test = GitCommand()
when(test).get_repo_path().thenReturn("probably/here")
when(test, strict=False).git("config", ...).thenReturn(git_output)
self.assertEqual(expected, test.fetch_branch_description_subjects())
| [
"herr.kaste@gmail.com"
] | herr.kaste@gmail.com |
e7c2e47da66a553d505b55a3d937d30f2c820114 | 0fffdc47f9876c665926d1b67e1cf8ecb0e735d7 | /awesimsoss/generate_darks.py | 1cf8b7919ce3f06119722ebc9727a79957aa07b5 | [
"MIT"
] | permissive | nespinoza/awesimsoss | 439c0857bc38b6f307f4dfd705b667c880b6fa0a | 24455b703f846a66cd2c3f6f7edc3960c129e031 | refs/heads/master | 2020-12-27T22:17:39.027475 | 2020-04-07T21:35:23 | 2020-04-07T21:35:23 | 238,078,808 | 0 | 0 | MIT | 2020-04-07T21:35:24 | 2020-02-03T22:56:39 | Jupyter Notebook | UTF-8 | Python | false | false | 7,973 | py | #! /usr/bin/env python
import os
import numpy as np
import astropy.io.fits as fits
from . import noise_simulation as ng
def add_dark_current(ramp, seed, gain, darksignal):
"""
Adds dark current to the input signal
Parameters
----------
ramp: sequence
The array of ramp images
seed: int
The seed for the dark signal
gain: float
The detector gain
darksignal: sequence
A 2D map of the dark signal to project onto the ramp
Returns
-------
np.ndarray
The dark signal ramp
"""
# Get the random seed and array shape
np.random.seed(seed)
dims = ramp.shape
# Add the dark signal to the ramp
total = darksignal*0.
for n in range(dims[0]):
signal = np.random.poisson(darksignal)/gain
total = total+signal
ramp[n,:,:] = ramp[n,:,:]+total
return ramp
def make_exposure(nints, ngrps, darksignal, gain, pca0_file, noise_seed=None,
dark_seed=None, offset=500):
"""
Make a simulated exposure with no source signal
Parameters
----------
nints: int
The number of integrations
ngrps: int
The number of groups per integration
darksignal: sequence
A dark frame
gain: float
The gain on the detector
pca0_file: str
The path to the PCA-zero file
noise_seed: int
The seed for the generated noise
dark_seed: int
The seed for the generated dark
offset: int
The pedestal offset
Returns
-------
np.ndarray
A simulated ramp of darks
"""
if nints < 1 or ngrps < 1:
return None
if not noise_seed:
noise_seed = 7+int(np.random.uniform()*4000000000.)
if not dark_seed:
dark_seed = 5+int(np.random.uniform()*4000000000.)
np.random.seed(dark_seed)
# Make empty data array
nrows, ncols = darksignal.shape
simulated_data = np.zeros([nints*ngrps,nrows,ncols], dtype=np.float32)
# Define some constants
pedestal = 18.30
c_pink = 9.6
u_pink = 3.2
acn = 2.0
bias_amp = 0.
#bias_amp = 5358.87
#bias_offset = 20944.06
pca0_amp = 0.
rd_noise = 12.95
dark_current = 0.0
dc_seed = dark_seed
bias_offset = offset*gain
# Define the HXRGN instance to make a SUSBSTRIP256 array
#(in detector coordinates)
noisecube = ng.HXRGNoise(naxis1=nrows, naxis2=ncols, naxis3=ngrps,
pca0_file=pca0_file, x0=0, y0=0, det_size=2048,
verbose=False)
# iterate over integrations
for loop in range(nints):
seed1 = noise_seed+24*int(loop)
ramp = noisecube.mknoise(c_pink=c_pink, u_pink=u_pink,
bias_amp=bias_amp, bias_offset=bias_offset,
acn=acn, pca0_amp=pca0_amp, rd_noise=rd_noise,
pedestal=pedestal, dark_current=dark_current,
dc_seed=dc_seed, noise_seed=seed1, gain=gain)
if len(ramp.shape)==2:
ramp = ramp[np.newaxis,:,:]
ramp = np.transpose(ramp,(0,2,1))
ramp = ramp[::,::-1,::-1]
ramp = add_dark_current(ramp, dc_seed, gain, darksignal)
simulated_data[loop*ngrps:(loop+1)*ngrps,:,:] = np.copy(ramp)
ramp = 0
return simulated_data
def make_photon_yield(photon_yield, orders):
"""
Generates a map of the photon yield for each order.
The shape of both arrays should be [order, nrows, ncols]
Parameters
----------
photon_yield: str
The path to the file containg the calculated photon yield at each pixel
orders: sequence
An array of the median image of each order
Returns
-------
np.ndarray
The array containing the photon yield map for each order
"""
# Get the shape and create empty arrays
dims = orders.shape
sum1 = np.zeros((dims[1], dims[2]), dtype=np.float32)
sum2 = np.zeros((dims[1], dims[2]), dtype=np.float32)
# Add the photon yield for each order
for n in range(dims[0]):
sum1 = sum1+photon_yield[n, :, :]*orders[n, :, :]
sum2 = sum2+orders[n, :, :]
# Take the ratio of the photon yield to the signal
pyimage = sum1/sum2
pyimage[np.where(sum2 == 0.)] = 1.
return pyimage
def add_signal(signals, cube, pyimage, frametime, gain, zodi, zodi_scale,
photon_yield=False):
"""
Add the science signal to the generated noise
Parameters
----------
signals: sequence
The science frames
cube: sequence
The generated dark ramp
pyimage: sequence
The photon yield per order
frametime: float
The number of seconds per frame
gain: float
The detector gain
zodi: sequence
The zodiacal background image
zodi_scale: float
The scale factor for the zodi background
"""
# Get the data dimensions
dims1 = cube.shape
dims2 = signals.shape
if dims1 != dims2:
raise ValueError(dims1, "not equal to", dims2)
# Make a new ramp
newcube = cube.copy()*0.
# The background is assumed to be in electrons/second/pixel, not ADU/s/pixel.
background = zodi*zodi_scale*frametime
# Iterate over each group
for n in range(dims1[0]):
framesignal = signals[n,:,:]*gain*frametime
# Add photon yield
if photon_yield:
newvalues = np.random.poisson(framesignal)
target = pyimage-1.
for k in range(dims1[1]):
for l in range(dims1[2]):
if target[k,l] > 0.:
n = int(newvalues[k,l])
values = np.random.poisson(target[k,l], size=n)
newvalues[k,l] = newvalues[k,l]+np.sum(values)
newvalues = newvalues+np.random.poisson(background)
# Or don't
else:
vals = np.abs(framesignal*pyimage+background)
newvalues = np.random.poisson(vals)
# First ramp image
if n==0:
newcube[n,:,:] = newvalues
else:
newcube[n,:,:] = newcube[n-1,:,:]+newvalues
newcube = cube+newcube/gain
return newcube
def non_linearity(cube, nonlinearity, offset=0):
"""
Add nonlinearity to the ramp
Parameters
----------
cube: sequence
The ramp with no non-linearity
nonlinearity: sequence
The non-linearity image to add to the ramp
offset: int
The non-linearity offset
Returns
-------
np.ndarray
The ramp with the added non-linearity
"""
# Get the dimensions of the input data
dims1 = nonlinearity.shape
dims2 = cube.shape
if (dims1[1] != dims2[1]) | (dims1[1] != dims2[1]):
raise ValueError
# Make a new array for the ramp+non-linearity
newcube = cube-offset
for k in range(dims2[0]):
frame = np.squeeze(np.copy(newcube[k,:,:]))
sum1 = frame*0.
for n in range(dims1[0]-1,-1,-1):
sum1 = sum1+nonlinearity[n,:,:]*np.power(frame,n+1)
sum1 = frame*(1.+sum1)
newcube[k,:,:] = sum1
newcube = newcube+offset
return newcube
def add_pedestal(cube, pedestal, offset=500):
"""
Add a pedestal to the ramp
Parameters
----------
cube: sequence
The ramp with no pedestal
pedestal: sequence
The pedestal image to add to the ramp
offset: int
The pedestal offset
Returns
-------
np.ndarray
The ramp with the added pedestal
"""
# Add the offset to the pedestal
ped1 = pedestal+(offset-500.)
# Make a new array for the ramp+pedestal
dims = cube.shape
newcube = np.zeros_like(cube,dtype=np.float32)
# Iterate over each integration
for n in range(dims[0]):
newcube[n,:,:] = cube[n,:,:]+ped1
newcube = newcube.astype(np.uint16)
return newcube
| [
"jfilippazzo@stsci.edu"
] | jfilippazzo@stsci.edu |
057491a1237ffc4bef99c167ba0dcb7674f14ccd | 45ab4c22d918dc4390572f53c267cf60de0d68fb | /src/Analysis/Engine/Impl/Typeshed/third_party/2and3/requests/sessions.pyi | c01b5e15dd227f974e24e619ea84e3f29113ac3c | [
"MIT",
"Apache-2.0"
] | permissive | sourcegraph/python-language-server | 580a24fd15fe9d4abeb95e9333d61db1c11a2670 | 64eae156f14aa14642afcac0e7edaf5d7c6d1a1c | refs/heads/master | 2023-04-09T21:17:07.555979 | 2018-12-06T23:25:05 | 2018-12-06T23:25:05 | 155,174,256 | 2 | 2 | Apache-2.0 | 2018-10-29T08:06:49 | 2018-10-29T08:06:49 | null | UTF-8 | Python | false | false | 5,198 | pyi | # Stubs for requests.sessions (Python 3)
from typing import Any, Union, List, MutableMapping, Text, Optional, IO, Tuple, Callable, Iterable
from . import adapters
from . import auth as _auth
from . import compat
from . import cookies
from . import models
from .models import Response
from . import hooks
from . import utils
from . import exceptions
from .packages.urllib3 import _collections
from . import structures
from . import adapters
from . import status_codes
BaseAdapter = adapters.BaseAdapter
OrderedDict = compat.OrderedDict
cookiejar_from_dict = cookies.cookiejar_from_dict
extract_cookies_to_jar = cookies.extract_cookies_to_jar
RequestsCookieJar = cookies.RequestsCookieJar
merge_cookies = cookies.merge_cookies
Request = models.Request
PreparedRequest = models.PreparedRequest
DEFAULT_REDIRECT_LIMIT = models.DEFAULT_REDIRECT_LIMIT
default_hooks = hooks.default_hooks
dispatch_hook = hooks.dispatch_hook
to_key_val_list = utils.to_key_val_list
default_headers = utils.default_headers
to_native_string = utils.to_native_string
TooManyRedirects = exceptions.TooManyRedirects
InvalidSchema = exceptions.InvalidSchema
ChunkedEncodingError = exceptions.ChunkedEncodingError
ContentDecodingError = exceptions.ContentDecodingError
RecentlyUsedContainer = _collections.RecentlyUsedContainer
CaseInsensitiveDict = structures.CaseInsensitiveDict
HTTPAdapter = adapters.HTTPAdapter
requote_uri = utils.requote_uri
get_environ_proxies = utils.get_environ_proxies
get_netrc_auth = utils.get_netrc_auth
should_bypass_proxies = utils.should_bypass_proxies
get_auth_from_url = utils.get_auth_from_url
codes = status_codes.codes
REDIRECT_STATI = models.REDIRECT_STATI
REDIRECT_CACHE_SIZE = ... # type: Any
def merge_setting(request_setting, session_setting, dict_class=...): ...
def merge_hooks(request_hooks, session_hooks, dict_class=...): ...
class SessionRedirectMixin:
def resolve_redirects(self, resp, req, stream=..., timeout=..., verify=..., cert=...,
proxies=...): ...
def rebuild_auth(self, prepared_request, response): ...
def rebuild_proxies(self, prepared_request, proxies): ...
_Data = Union[None, bytes, MutableMapping[Text, Text], IO]
_Hook = Callable[[Response], Any]
_Hooks = MutableMapping[Text, List[_Hook]]
_HooksInput = MutableMapping[Text, Union[Iterable[_Hook], _Hook]]
class Session(SessionRedirectMixin):
__attrs__ = ... # type: Any
headers = ... # type: MutableMapping[Text, Text]
auth = ... # type: Union[None, Tuple[Text, Text], _auth.AuthBase, Callable[[Request], Request]]
proxies = ... # type: MutableMapping[Text, Text]
hooks = ... # type: _Hooks
params = ... # type: Union[bytes, MutableMapping[Text, Text]]
stream = ... # type: bool
verify = ... # type: Union[None, bool, Text]
cert = ... # type: Union[None, Text, Tuple[Text, Text]]
max_redirects = ... # type: int
trust_env = ... # type: bool
cookies = ... # type: Union[RequestsCookieJar, MutableMapping[Text, Text]]
adapters = ... # type: MutableMapping
redirect_cache = ... # type: RecentlyUsedContainer
def __init__(self) -> None: ...
def __enter__(self) -> 'Session': ...
def __exit__(self, *args) -> None: ...
def prepare_request(self, request): ...
def request(self, method: str, url: str,
params: Union[None, bytes, MutableMapping[Text, Text]] = ...,
data: _Data = ...,
headers: Optional[MutableMapping[Text, Text]] = ...,
cookies: Union[None, RequestsCookieJar, MutableMapping[Text, Text]] = ...,
files: Optional[MutableMapping[Text, IO]] = ...,
auth: Union[None, Tuple[Text, Text], _auth.AuthBase, Callable[[Request], Request]] = ...,
timeout: Union[None, float, Tuple[float, float]] = ...,
allow_redirects: Optional[bool] = ...,
proxies: Optional[MutableMapping[Text, Text]] = ...,
hooks: Optional[_HooksInput] = ...,
stream: Optional[bool] = ...,
verify: Union[None, bool, Text] = ...,
cert: Union[Text, Tuple[Text, Text], None] = ...,
json: Optional[MutableMapping] = ...,
) -> Response: ...
def get(self, url: Union[Text, bytes], **kwargs) -> Response: ...
def options(self, url: Union[Text, bytes], **kwargs) -> Response: ...
def head(self, url: Union[Text, bytes], **kwargs) -> Response: ...
def post(self, url: Union[Text, bytes], data: _Data = ..., json: Optional[MutableMapping] = ..., **kwargs) -> Response: ...
def put(self, url: Union[Text, bytes], data: _Data = ..., **kwargs) -> Response: ...
def patch(self, url: Union[Text, bytes], data: _Data = ..., **kwargs) -> Response: ...
def delete(self, url: Union[Text, bytes], **kwargs) -> Response: ...
def send(self, request, **kwargs): ...
def merge_environment_settings(self, url, proxies, stream, verify, cert): ...
def get_adapter(self, url): ...
def close(self) -> None: ...
def mount(self, prefix:
Union[Text, bytes],
adapter: BaseAdapter) -> None: ...
def session() -> Session: ...
| [
"alsher@microsoft.com"
] | alsher@microsoft.com |
cc94128aa202a02289d2a1af9bf3cdb56f1c5360 | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Scripts/Downloading Files.py | 1d0d790a38a971745fd005fe0779c1009c3cd171 | [] | no_license | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 128 | py | version https://git-lfs.github.com/spec/v1
oid sha256:c75100f4f9546784ea8796f60c014b05c1a6d85dc6339a653778693693dfda95
size 570
| [
"nateweiler84@gmail.com"
] | nateweiler84@gmail.com |
3cf9f47ccdd76898354a87cd03513c3853c0f2f5 | 60107fe4be58b8a96304ef1128c1514943efa2f9 | /19.py | 24b945692299a9de4f40994b0f287d7f28c65b75 | [] | no_license | Aidana172003/TSIS6 | 5293d23a2827644aaea2962f964e71efe7a42708 | 3451f73507171782d558a75a1bd46be7ecda2914 | refs/heads/main | 2023-06-12T06:35:46.043423 | 2021-07-08T07:23:30 | 2021-07-08T07:23:30 | 384,037,383 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | def test(a):
def add(b):
nonlocal a
a+=1
return a+b
return add
func=test(4)
print(func(4)) | [
"noreply@github.com"
] | Aidana172003.noreply@github.com |
b282ae4a8df3eb798acfcdf196f7e3f240860174 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/identity/azure-identity/tests/managed-identity-live/test_cloud_shell.py | aa125848a2efed089f300301bbc93f86011acc2c | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 1,113 | py | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import pytest
from azure.core import PipelineClient
from azure.core.pipeline.policies import ContentDecodePolicy, HttpLoggingPolicy, RedirectPolicy, RetryPolicy
from azure.identity import ManagedIdentityCredential
@pytest.mark.cloudshell
def test_cloud_shell_live(cloud_shell):
credential = ManagedIdentityCredential()
token = credential.get_token("https://vault.azure.net")
# Validate the token by sending a request to the Key Vault. The request is manual because azure-keyvault-secrets
# can't authenticate in Cloud Shell; the MSI endpoint there doesn't support AADv2 scopes.
policies = [ContentDecodePolicy(), RedirectPolicy(), RetryPolicy(), HttpLoggingPolicy()]
client = PipelineClient(cloud_shell["vault_url"], policies=policies)
list_secrets = client.get(
"secrets", headers={"Authorization": "Bearer " + token.token}, params={"api-version": "7.0"}
)
with client:
client._pipeline.run(list_secrets)
| [
"noreply@github.com"
] | Azure.noreply@github.com |
0e6fd93f5f79c5c1a20e94ccb2adab320a1de061 | a2e638cd0c124254e67963bda62c21351881ee75 | /Python modules/PasswordSub.py | 5ce89e5c41ce5e79cabb90ced4794759293b987b | [] | no_license | webclinic017/fa-absa-py3 | 1ffa98f2bd72d541166fdaac421d3c84147a4e01 | 5e7cc7de3495145501ca53deb9efee2233ab7e1c | refs/heads/main | 2023-04-19T10:41:21.273030 | 2021-05-10T08:50:05 | 2021-05-10T08:50:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,225 | py | import sys
import ael
import time
import string
ael_variables = []
def check_password(user):
time.sleep(6)
mailto = user.updat_usrnbr.userid
msg = 'To change a password go to File -> Preferences -> Passwords -> ADS.'
#if user.updat_usrnbr.userid != ael.User[user.updat_usrnbr].updat_usrnbr.userid.
if ael.User[user.updat_usrnbr.userid].add_info('PasswordResetDate') == '':
subj = 'Initial Password'
ael.sendmessage(mailto, subj, msg)
ael.log(mailto+ subj)
else:
#Check if password is > 30 days
ResetDate = ael.date(user.updat_usrnbr.add_info('PasswordResetDate'))
LatestDate= ael.date_from_time(user.creat_time)
if ResetDate.days_between(LatestDate) >= 25 and ResetDate.days_between(LatestDate) <= 30:
subj = 'Password will expire in :' + ResetDate.days_between(LatestDate)
ael.sendmessage(mailto, subj, msg)
ael.log(mailto+subj)
if ResetDate.days_between(LatestDate) > 30 :
subj = 'Your password has expired and your userid will be locked please change password now'
ael.sendmessage(mailto, subj, msg)
# thisuser = ael.User[mailto].clone()
# thisuser.inactive = 1
# thisuser.commit()
ael.log(mailto+ subj)
def start():
#Start subscription on the userlog table
print "Starting UserLog subscription"
ael.UserLog.subscribe(userlog_update_cb)
def stop():
print "Stopping userlog subscription"
ael.UserLog.unsubscribe(userlog_update_cb)
def userlog_update_cb(obj, userlog, arg, event):
#Check Password if has not expired
if event in ['insert', 'update'] and userlog.type in ['Login', 'Logoff']:
# print obj, userlog.pp(), arg, event
check_password(userlog)
def ael_main(ael_dict):
if __name__=="__main__":
# Called from command line, connect first
# ael.connect('sun23:7771', 'FRED', 'secret', 'TimeSeriesSample')
start()
ael.main_loop()
else:
# Called from GUI client, already connected
start()
#stop()
| [
"nencho.georogiev@absa.africa"
] | nencho.georogiev@absa.africa |
7e15206079e2328ba961416fe868740e2f8a4dbb | ae29491fdfa0ef139e2146e3cdb088781efd1ff0 | /lang.py | 386349f17dc3daf551943a692231a5b254605dfc | [] | no_license | PyLamGR/Aurora-Bot | bcaa131811d7d05dc6bdb909f5f7b7f6f0ca250c | 1904eff6133765568f2e72c076827e3d8d6f4e8e | refs/heads/master | 2020-03-22T19:39:05.878137 | 2018-07-13T11:08:52 | 2018-07-13T11:08:52 | 140,542,391 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 412 | py | from configs import lang
# TODO: Automate this process
# https://stackoverflow.com/questions/18090672/convert-dictionary-entries-into-variables-python
TAG_REQUIRED = lang['TAG_REQUIRED']
MEMBER_NOT_FOUND = lang['MEMBER_NOT_FOUND']
UNKNOWN_ERROR = lang['UNKNOWN_ERROR']
#
# locals().update(lang)
#
# for name, value in locals().copy().items():
# print(name, value)
# __dict__ = lang
__dict__ = ['HEY_ALL']
| [
"wckdawe@gmail.com"
] | wckdawe@gmail.com |
c553c8c848f14f4ef73947ba0fa585fe765d4784 | a1afebeb04af3e9bbe5e9cf8a2468d8f1a99f9cc | /import-pipelines/LaserChron/setup.py | 4b3eecd661b858b658f0d15b18d3e96eb5051943 | [] | no_license | yeshancqcq/Sparrow | 0f4527740d0f1cca535e7b59384bcbe9ccaa1682 | dcfd2eeacc524ae752e6e68ea84fa4e58645337d | refs/heads/master | 2020-05-29T22:41:24.235748 | 2019-07-29T20:42:01 | 2019-07-29T20:42:01 | 189,416,026 | 0 | 0 | null | 2019-05-30T13:09:46 | 2019-05-30T13:09:46 | null | UTF-8 | Python | false | false | 235 | py | from setuptools import setup
setup(
name='sparrow_import_laserchron',
version='0.1',
package_dir={'sparrow_import_laserchron': 'sparrow_import_laserchron'},
install_requires=['sqlalchemy', 'pandas', 'xlrd', 'click']
)
| [
"dev@davenquinn.com"
] | dev@davenquinn.com |
d2a233ab84eb7fdc73b95443c1c2f1fabfdbcacc | 4054fde482f06ba5566ff88ff7c65b7221f4fd91 | /forml/flow/_code/compiler.py | bb259a1f4a1644ef19cecd1802c15d986858773d | [
"Apache-2.0"
] | permissive | formlio/forml | e54278c2cc76cdfaf9d4feb405bd1a98c6dcd3e6 | 373bf4329338a9056e43966b8cfa458529ed2817 | refs/heads/main | 2023-06-07T21:38:34.952453 | 2023-05-28T21:53:47 | 2023-05-28T21:53:47 | 310,066,051 | 108 | 15 | Apache-2.0 | 2023-05-28T19:38:16 | 2020-11-04T17:04:13 | Python | UTF-8 | Python | false | false | 11,993 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Runtime symbols compilation.
"""
import collections
import functools
import itertools
import logging
import typing
import uuid
from .. import _exception
from .._graph import atomic, span
from . import target
from .target import system, user
if typing.TYPE_CHECKING:
from forml import flow
from forml.io import asset
LOGGER = logging.getLogger(__name__)
class Table(span.Visitor, typing.Iterable):
"""Dynamic builder of the runtime symbols. Table uses node UIDs and GIDs where possible as instruction keys."""
class Linkage:
"""Structure for registering instruction dependency tree as relations between target (receiving) instruction
and its upstream dependency instructions representing its positional arguments.
"""
def __init__(self):
self._absolute: dict[uuid.UUID, list[typing.Optional[uuid.UUID]]] = collections.defaultdict(list)
self._prefixed: dict[uuid.UUID, list[typing.Optional[uuid.UUID]]] = collections.defaultdict(list)
def __getitem__(self, instruction: uuid.UUID) -> typing.Sequence[uuid.UUID]:
return tuple(itertools.chain(reversed(self._prefixed[instruction]), self._absolute[instruction]))
@property
def leaves(self) -> typing.AbstractSet[uuid.UUID]:
"""Return the leaf nodes that are anyone's dependency.
Returns:
leaf nodes.
"""
parents = {i for a in itertools.chain(self._absolute.values(), self._prefixed.values()) for i in a}
children = set(self._absolute).union(self._prefixed).difference(parents)
assert children, 'Not acyclic'
return children
def insert(self, instruction: uuid.UUID, argument: uuid.UUID, index: typing.Optional[int] = None) -> None:
"""Store given argument as a positional parameter of given instruction at absolute offset given by index.
Index can be omitted for single-argument instructions.
Args:
instruction: Target (receiver) instruction.
argument: Positional argument to be stored.
index: Position offset of given argument.
"""
args = self._absolute[instruction]
argcnt = len(args)
if index is None:
assert argcnt <= 1, f'Index required for multiarg ({argcnt}) instruction'
index = 0
assert index >= 0, 'Invalid positional index'
if argcnt <= index:
args.extend([None] * (index - argcnt + 1))
assert not args[index], 'Link collision'
args[index] = argument
def update(self, node: 'flow.Worker', getter: typing.Callable[[int], uuid.UUID]) -> None:
"""Register given node (its eventual functor) as an absolute positional argument of all of its subscribers.
For multi-output nodes the output needs to be passed through Getter instructions that are extracting
individual items.
Args:
node: Worker node (representing its actual functor) as an positional argument of its subscribers.
getter: Callback for creating a Getter instruction for given positional index and returning its key.
"""
if node.szout == 1:
for subscriber in node.output[0]:
self.insert(subscriber.node.uid, node.uid, subscriber.port)
else:
for index, output in enumerate(node.output):
source = getter(index)
self.insert(source, node.uid)
for subscriber in output:
self.insert(subscriber.node.uid, source, subscriber.port)
def prepend(self, instruction: uuid.UUID, argument: uuid.UUID) -> None:
"""In contrast to the absolute positional arguments we can potentially prepend these with various system
arguments that should eventually prefix the absolute ones.
Here we just append these to a list but during iteration we read them in reverse to reflect the prepend
order.
Args:
instruction: Key of the target (receiver) instruction.
argument: Argument (instruction key) to be prepended to the list of the absolute arguments.
"""
self._prefixed[instruction].append(argument)
class Index:
"""Mapping of the stored instructions. Same instruction might be stored under multiple keys."""
def __init__(self):
self._instructions: dict[uuid.UUID, 'flow.Instruction'] = {}
def __contains__(self, key: uuid.UUID) -> bool:
return key in self._instructions
def __getitem__(self, key: uuid.UUID):
return self._instructions[key]
@property
def instructions(self) -> 'typing.Iterator[tuple[flow.Instruction, typing.Iterator[uuid.UUID]]]':
"""Iterator over tuples of instructions plus iterator of its keys.
Returns:
Instruction-keys tuples iterator.
"""
return itertools.groupby(self._instructions.keys(), self._instructions.__getitem__)
def set(self, instruction: 'flow.Instruction', key: typing.Optional[uuid.UUID] = None) -> uuid.UUID:
"""Store given instruction by provided or generated key.
It is an error to store instruction with existing key (to avoid, use the reset method).
Args:
instruction: Runtime instruction to be stored.
key: Optional key to be used as instruction reference.
Returns:
Key associated with the instruction.
"""
if not key:
key = uuid.uuid4()
assert key not in self, 'Instruction collision'
self._instructions[key] = instruction
return key
def reset(self, orig: uuid.UUID, new: typing.Optional[uuid.UUID] = None) -> uuid.UUID:
"""Re-register instruction under given key to a new key (provided or generate).
Args:
orig: Original key of the instruction to be re-registered.
new: Optional new key to re-register the instruction with.
Returns:
New key associated with the instruction.
"""
instruction = self._instructions[orig]
del self._instructions[orig]
return self.set(instruction, new)
def __init__(self, assets: typing.Optional['asset.State']):
self._assets: typing.Optional['asset.State'] = assets
self._linkage: Table.Linkage = self.Linkage()
self._index: Table.Index = self.Index()
self._committer: typing.Optional[uuid.UUID] = None
def __iter__(self) -> 'flow.Symbol':
def merge(
value: typing.Iterable[typing.Optional[uuid.UUID]], element: typing.Iterable[typing.Optional[uuid.UUID]]
) -> typing.Iterable[uuid.UUID]:
"""Merge two iterables with at most one of them having non-null value on each offset into single iterable
with this non-null values picked.
Args:
value: Left iterable.
element: Right iterable.
Returns:
Merged iterable.
"""
def pick(left: typing.Optional[uuid.UUID], right: typing.Optional[uuid.UUID]) -> typing.Optional[uuid.UUID]:
"""Pick the non-null value from the two arguments.
Args:
left: Left input argument to pick from.
right: Right input argument to pick from.
Returns:
The non-null value of the two (if any).
"""
assert not (left and right), 'Expecting at most one non-null value'
return left if left else right
return (pick(a, b) for a, b in itertools.zip_longest(value, element))
stubs = {s for s in (self._index[n] for n in self._linkage.leaves) if isinstance(s, system.Getter)}
for instruction, keys in self._index.instructions:
if instruction in stubs:
LOGGER.debug('Pruning stub getter %s', instruction)
continue
try:
arguments = tuple(self._index[a] for a in functools.reduce(merge, (self._linkage[k] for k in keys)))
except KeyError as err:
raise _exception.AssemblyError(f'Argument mismatch for instruction {instruction}') from err
yield target.Symbol(instruction, arguments)
def add(self, node: 'flow.Worker') -> None:
"""Populate the symbol table to implement the logical flow of given node.
Args:
node: Node to be added - compiled into symbols.
"""
assert node.uid not in self._index, f'Node collision ({node})'
assert isinstance(node, atomic.Worker), f'Not a worker node ({node})'
LOGGER.debug('Adding node %s into the symbol table', node)
functor = user.Apply().functor(node.builder)
aliases = [node.uid]
if node.stateful:
state = node.gid
persistent = self._assets and state in self._assets
if persistent and state not in self._index:
self._index.set(system.Loader(self._assets, state), state)
if node.trained:
functor = user.Train().functor(node.builder)
aliases.append(state)
if persistent:
if not self._committer:
self._committer = self._index.set(system.Committer(self._assets))
dumper = self._index.set(system.Dumper(self._assets))
self._linkage.insert(dumper, node.uid)
self._linkage.insert(self._committer, dumper, self._assets.offset(state))
state = self._index.reset(state) # re-register loader under it's own id
if persistent or node.derived:
functor = functor.preset_state()
self._linkage.prepend(node.uid, state)
for key in aliases:
self._index.set(functor, key)
if not node.trained:
self._linkage.update(node, lambda index: self._index.set(system.Getter(index)))
def visit_node(self, node: 'flow.Worker') -> None:
"""Visitor entrypoint.
Args:
node: Node to be visited.
"""
self.add(node)
def compile( # pylint: disable=redefined-builtin
segment: 'flow.Segment', assets: typing.Optional['asset.State'] = None
) -> typing.Collection['flow.Symbol']:
"""Generate the portable low-level runtime symbol table representing the given flow topology
segment augmented with all the necessary system instructions.
Args:
segment: Flow topology segment to generate the symbol table for.
assets: Runtime state asset accessors for all the involved persistent workers.
Returns:
The portable runtime symbol table.
"""
table = Table(assets)
segment.accept(table)
return tuple(table)
| [
"antonymayi@yahoo.com"
] | antonymayi@yahoo.com |
de671b4fca33d2cc397e7ef03128b134a2feba3a | 22a8d887247f5620221f17906f26eba529aac4bb | /TwoTwo/eLEVEN.py | 4a5b0baab0caa5106099665ff752b371390f06d1 | [] | no_license | mubaskid/new | 2255c01a189068661c47bb4ce77e7d505d1ce23b | 56996963b5ed069907848adc66bfdd27cc24539e | refs/heads/master | 2023-06-09T09:46:43.744027 | 2021-04-21T13:47:06 | 2021-04-21T13:47:06 | 356,242,664 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 81 | py | def sum_double(a, b):
if a is b:
return 2 * (a + b)
else:
return a+b
| [
"77334849+mubaskid@users.noreply.github.com"
] | 77334849+mubaskid@users.noreply.github.com |
db1cdee845b4b624fb660b2aea764c2af6c1f65a | e982ad81d18e3a983756b4c90311b007b9d5e276 | /pyspec/wxui/project.py | 62378ccb4286bfa0c3941582a2dfb6ee91771e7f | [
"MIT"
] | permissive | jyotijaya/pyspec | 2ca4428c3c9924154f7467edbdc1d8fddd59a817 | ae7a4de39beb3cf2e0838b6c3a9ef73d082445eb | refs/heads/master | 2022-12-27T20:42:15.818388 | 2020-10-01T11:50:19 | 2020-10-01T11:50:19 | 300,260,536 | 0 | 0 | NOASSERTION | 2020-10-01T11:49:40 | 2020-10-01T11:49:39 | null | UTF-8 | Python | false | false | 5,504 | py | # -*- coding: ascii -*-
__pyspec = 1
import os
import time
import ConfigParser
import pyspec.util
import pyspec.project
class WxPySpecProject(pyspec.project.PySpecProject):
def __init__(self, file_or_filename=None, last_used_time=None):
super(WxPySpecProject, self).__init__(file_or_filename, does_read=False)
if last_used_time is None:
self.last_used_time = time.time()
else:
self.last_used_time = last_used_time
def _clear_all(self):
super(WxPySpecProject, self)._clear_all()
self.auto_run = False
self.auto_reload = False
self.fail_activate = False
self.success_activate = False
def _read_template(self, parser):
self._read_bool_option(parser, "Config", "auto_run")
self._read_bool_option(parser, "Config", "auto_reload")
self._read_bool_option(parser, "Config", "fail_activate")
self._read_bool_option(parser, "Config", "success_activate")
def _save_template(self, parser):
parser.set("Config", "auto_run", str(self.auto_run))
parser.set("Config", "auto_reload", str(self.auto_reload))
parser.set("Config", "fail_activate", str(self.fail_activate))
parser.set("Config", "success_activate", str(self.success_activate))
class WxPySpecProjectManager(object):
def __init__(self, test_data=None):
self.projects = []
self.dirty_flag = False
if test_data is None:
filepath = pyspec.util.home_path("pyspec.conf")
if os.path.exists(filepath):
self._read_setting_file(file(filepath))
self._current().read()
else:
self.add_new_project()
self.test_mode = False
else:
self._read_setting_file(test_data)
self.test_mode = True
self.current_time_for_test = None
def _read_setting_file(self, fileobj):
for line in fileobj.readlines():
if line.strip() == "":
continue
filename, last_use = line.split("=")
self.projects.append(WxPySpecProject(filename, last_use))
if len(self.projects) == 0:
self.add_new_project()
else:
self.projects.sort(key=lambda o: o.last_used_time)
def _update_config_files(self):
if len(self.projects) > 5:
self.projects.sort(key=lambda o: o.last_used_time)
self.projects = self.projects[-5:]
if self.test_mode:
return
user_setting = file(pyspec.util.home_path("pyspec.conf"), "w")
for option in self.projects:
user_setting.write("%s=%d\n" % (option.get_filepath(),
option.last_used_time))
user_setting.close()
def _current(self):
return self.projects[-1]
def _current_time(self):
if not self.test_mode:
return time.time()
return self.current_time_for_test
def add_new_project(self):
self.projects.append(WxPySpecProject())
def open(self, filepath_or_file):
if not isinstance(filepath_or_file, basestring):
self._current().read(filepath_or_file)
return
is_new = True
for project in self.projects:
if filepath_or_file == project.get_filepath():
is_new = False
project.last_used_time = self._current_time()
if is_new:
self.projects.append(WxPySpecProject(filepath_or_file,
self._current_time()))
self._update_config_files()
if not self.test_mode:
self._current().set_filepath(filepath_or_file)
self._current().read()
def save(self, test_data=None):
target_project = self.projects[-1]
if not self.test_mode:
target_project.save()
self.dirty_flag = False
def save_as(self, filepath):
target_project = self.projects[-1]
target_project.last_used_time = self._current_time()
if not self.test_mode:
target_project.save(filepath)
self._update_config_files()
else:
target_project.set_filepath(filepath)
self.dirty_flag = False
def can_save(self):
return not self.is_default_file()
def should_save(self):
return self.dirty_flag
def set_dirty_flag(self):
self.dirty_flag = True
def is_default_file(self):
return self._current().is_default
def is_auto_run(self):
return self._current().auto_run
def is_auto_reload(self):
return self._current().auto_reload
def is_fail_activate(self):
return self._current().fail_activate
def is_success_activate(self):
return self._current().success_activate
def get_function_hook(self):
return self._current().function_hook
def display_filename(self):
if self._current().is_default:
return "*new"
if self.should_save():
return "* %s *" % self.get_filepath()
return self.get_filepath()
return self.get_filepath()
def get_filepath(self):
return self._current().get_filepath()
def last_used_time(self):
return self._current().last_used_time
def set_modules(self, specs):
self._current().reset_specs(specs)
def get_modules(self):
return sorted(self._current().specs.values())
| [
"yoshiki@shibu.jp"
] | yoshiki@shibu.jp |
33ac8635e4bcb4c809545df017ca374fe921575c | c4209246ef01b1276b443bf7ce887d0b30b242dc | /test.py | 2ddc2ea00909e1ebff009ba662fb851d05232ab1 | [] | no_license | avmangu/SURF-2017 | 55237d48e92647d3c7ccce3f7911d52218a85e85 | 6f25798e96fdae9006285b99f76861fc2196f2ce | refs/heads/master | 2020-03-21T04:14:18.866034 | 2018-06-21T00:17:39 | 2018-06-21T00:17:39 | 138,098,331 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,217 | py | import matplotlib.pyplot as plt
import matplotlib.animation as animation
import time
fig = plt.figure()
ax1 = fig.add_subplot(1,1,1)
# Crunch Input Data to Create Scanner Positions + Plot Coordinates
def liveGather(center, fine_range, fine_n, course_range, course_n):
# STEPS
fine_step = (fine_range - center) / fine_n
course_step = (course_range - center) / course_n
# NEGATIVE RANGE
fine_neg_range = center - (fine_range - center)
course_neg_range = center - (course_range - center)
# POSITIVE POSITIONS
pos = course_range
while(pos > fine_range):
pos = round(pos, 3)
course.append(pos)
pos -= course_step
pos = fine_range
fine.append(pos)
while(pos > center):
pos -= fine_step
pos = round(pos, 3)
fine.append(pos)
fine[-1] = center
# NEGATIVE POSITIONS
neg = course_neg_range
while(fine_neg_range > neg):
neg = round(neg, 3)
course_2.append(neg)
neg += course_step
neg = fine_neg_range
neg = round(neg, 3)
fine_2.append(neg)
while(center > neg):
neg += fine_step
neg = round(neg, 3)
fine_2.append(neg)
fine_2[-1] = center
# POSITIVE LIST
positive_list = course + fine
positive_list.sort(reverse = True)
for i in range(len(positive_list)):
while True:
if(getCurrent("steps") == stepConverter(positive_list[i])):
positive.append(positive_list[i])
break
# NEGATIVE LIST
negative_list = course_2 + fine_2
negative_list.sort(reverse = False)
for j in range(len(negative_list)):
while True:
if(getCurrent("steps") == stepConverter(positive_list[i])):
negative.append(negative_list[j])
break
def animate(i):
liveGather(center, fine_range, fine_n, course_range, course_n)
# CREATING Y-COORDINATES
for a in range(2):
ycoords.append(center)
for i in range(len(positive)):
ycoords.append(positive[i])
ycoords.append(positive[i])
for j in range(len(negative)):
ycoords.append(negative[j])
ycoords.append(negative[j])
# CREATING X-COORDINATES
xcoords.append(0)
time_delay = float(delay.text())
for x in range((len(ycoords) / 2) + 1):
if(x > 0):
if((len(ycoords) - len(xcoords)) == 1):
xcoords.append(time_delay * x)
break
xcoords.append(time_delay * x)
xcoords.append(time_delay * x)
ax1.clear()
ax1.plot(xcoords, ycoords)
ani = animation.FuncAnimation(fig, animate, interval=1000)
plt.show()
| [
"you@example.com"
] | you@example.com |
d93be9e28631574f08d987345e6a4340eb40a6d6 | 6ba506c39a7e32389b2933e21634c6fc79f16c3b | /tensorflow_graphics/rendering/opengl/rasterization_backend.py | 26cec9b2e747aeaf5921c60b0d0b5b0c513baf6a | [
"Apache-2.0"
] | permissive | kennycaiguo/graphics | 4edac0792b4494efdfdafceb500a24bb59b892fe | 4ffdf25ce7c3a27814a930e285c941752cc128e5 | refs/heads/master | 2023-06-12T18:11:01.858950 | 2021-07-05T13:44:40 | 2021-07-05T13:45:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,323 | py | # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OpenGL rasterization backend for TF Graphics."""
import tensorflow as tf
from tensorflow_graphics.rendering import framebuffer as fb
from tensorflow_graphics.util import export_api
from tensorflow_graphics.util import shape
# pylint: disable=g-import-not-at-top
try:
from tensorflow_graphics.rendering.opengl import gen_rasterizer_op as render_ops
except ImportError:
import os
dir_path = os.path.dirname(os.path.abspath(__file__))
render_ops = tf.load_op_library(os.path.join(dir_path, "rasterizer_op.so"))
# pylint: enable=g-import-not-at-top
def _dim_value(dim):
return 1 if dim is None else tf.compat.dimension_value(dim)
# Empty vertex shader; all the work happens in the geometry shader.
vertex_shader = """
#version 430
void main() { }
"""
# Geometry shader that projects the vertices of visible triangles onto the image
# plane.
geometry_shader = """
#version 430
uniform mat4 view_projection_matrix;
layout(points) in;
layout(triangle_strip, max_vertices=3) out;
out layout(location = 0) vec2 barycentric_coordinates;
out layout(location = 1) float triangle_index;
layout(binding=0) buffer triangular_mesh { float mesh_buffer[]; };
vec3 get_vertex_position(int vertex_index) {
// Triangles are packed as 3 consecuitve vertices, each with 3 coordinates.
int offset = gl_PrimitiveIDIn * 9 + vertex_index * 3;
return vec3(mesh_buffer[offset], mesh_buffer[offset + 1],
mesh_buffer[offset + 2]);
}
void main() {
vec3 positions[3] = {get_vertex_position(0), get_vertex_position(1),
get_vertex_position(2)};
vec4 projected_vertices[3] = {
view_projection_matrix * vec4(positions[0], 1.0),
view_projection_matrix * vec4(positions[1], 1.0),
view_projection_matrix * vec4(positions[2], 1.0)};
for (int i = 0; i < 3; ++i) {
// gl_Position is a pre-defined size 4 output variable.
gl_Position = projected_vertices[i];
barycentric_coordinates = vec2(i==0 ? 1.0 : 0.0, i==1 ? 1.0 : 0.0);
triangle_index = gl_PrimitiveIDIn;
EmitVertex();
}
EndPrimitive();
}
"""
# Fragment shader that packs barycentric coordinates, and triangle index.
fragment_shader = """
#version 430
in layout(location = 0) vec2 barycentric_coordinates;
in layout(location = 1) float triangle_index;
out vec4 output_color;
void main() {
output_color = vec4(round(triangle_index), barycentric_coordinates, 1.0);
}
"""
def rasterize(vertices,
triangles,
view_projection_matrices,
image_size,
enable_cull_face,
num_layers,
name="rasterization_backend_rasterize"):
"""Rasterizes the scene.
This rasterizer estimates which triangle is associated with each pixel using
OpenGL.
Note:
In the following, A1 to An are optional batch dimensions which must be
broadcast compatible for inputs `vertices` and `view_projection_matrices`.
Args:
vertices: A tensor of shape `[batch, num_vertices, 3]` containing batches
vertices, each defined by a 3D point.
triangles: A tensor of shape `[num_triangles, 3]` each associated with 3
vertices from `scene_vertices`
view_projection_matrices: A tensor of shape `[batch, 4, 4]` containing
batches of view projection matrices
image_size: An tuple of integers (width, height) containing the dimensions
in pixels of the rasterized image.
enable_cull_face: A boolean, which will enable BACK face culling when True
and no face culling when False. Default is True.
num_layers: Number of depth layers to render. Not supported by current
backend yet, but exists for interface compatibility.
name: A name for this op. Defaults to "rasterization_backend_rasterize".
Returns:
A Framebuffer containing the rasterized values: barycentrics, triangle_id,
foreground_mask, vertex_ids. Returned Tensors have shape
[batch, num_layers, height, width, channels]
Note: triangle_id contains the triangle id value for each pixel in the
output image. For pixels within the mesh, this is the integer value in the
range [0, num_vertices] from triangles. For vertices outside the mesh this
is 0; 0 can either indicate belonging to triangle 0, or being outside the
mesh. This ensures all returned triangle ids will validly index into the
vertex array, enabling the use of tf.gather with indices from this tensor.
The barycentric coordinates can be used to determine pixel validity instead.
See framebuffer.py for a description of the Framebuffer fields.
"""
with tf.name_scope(name):
if num_layers != 1:
raise ValueError("OpenGL rasterizer only supports single layer.")
vertices = tf.convert_to_tensor(value=vertices)
triangles = tf.convert_to_tensor(value=triangles)
view_projection_matrices = tf.convert_to_tensor(
value=view_projection_matrices)
shape.check_static(
tensor=vertices,
tensor_name="vertices",
has_rank=3,
has_dim_equals=((-1, 3)))
shape.check_static(
tensor=triangles,
tensor_name="triangles",
has_rank=2,
has_dim_equals=((-1, 3)))
shape.check_static(
tensor=view_projection_matrices,
tensor_name="view_projection_matrices",
has_rank=3,
has_dim_equals=((-1, 4), (-2, 4)))
shape.compare_batch_dimensions(
tensors=(vertices, view_projection_matrices),
tensor_names=("vertices", "view_projection_matrices"),
last_axes=(-3, -3),
broadcast_compatible=True)
geometry = tf.gather(vertices, triangles, axis=-2)
# Extract batch size in order to make sure it is preserved after `gather`
# operation.
batch_size = _dim_value(vertices.shape[0])
rasterized = render_ops.rasterize(
num_points=geometry.shape[-3],
alpha_clear=0.0,
enable_cull_face=enable_cull_face,
variable_names=("view_projection_matrix", "triangular_mesh"),
variable_kinds=("mat", "buffer"),
variable_values=(view_projection_matrices,
tf.reshape(geometry, shape=[batch_size, -1])),
output_resolution=image_size,
vertex_shader=vertex_shader,
geometry_shader=geometry_shader,
fragment_shader=fragment_shader)
triangle_index = tf.cast(rasterized[..., 0], tf.int32)
# Slicing of the tensor will result in all batch dimensions being
# `None` for tensorflow graph mode, therefore we have to fix it in order to
# have explicit shape.
width, height = image_size
triangle_index = tf.reshape(triangle_index, [batch_size, height, width, 1])
barycentric_coordinates = rasterized[..., 1:3]
barycentric_coordinates = tf.concat(
(barycentric_coordinates, 1.0 - barycentric_coordinates[..., 0:1] -
barycentric_coordinates[..., 1:2]),
axis=-1)
mask = rasterized[..., 3]
mask = tf.reshape(mask, [batch_size, height, width, 1])
barycentric_coordinates = mask * barycentric_coordinates
vertex_ids = tf.gather(triangles, triangle_index[..., 0], batch_dims=0)
# Stop gradient for tensors coming out of custom op in order to avoid
# confusing Tensorflow that they are differentiable.
barycentric_coordinates = tf.stop_gradient(barycentric_coordinates)
mask = tf.stop_gradient(mask)
return fb.Framebuffer(
foreground_mask=mask,
triangle_id=triangle_index,
vertex_ids=vertex_ids,
barycentrics=fb.RasterizedAttribute(
value=barycentric_coordinates, d_dx=None, d_dy=None))
# API contains all public functions and classes.
__all__ = export_api.get_functions_and_classes()
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
fbb9b24bdcac31d3b67697b3d22d73c4739a0aa7 | 482f69590cc30c5cf0057ecf6d7ec12e9092e2e1 | /backend/Date_Ocr/manage.py | 2ffea550449aff383cf89893460e73de391bed3d | [
"MIT"
] | permissive | namanMANU/date-ocr | d16c62848b90718276bfef0b3b59707491a7f20e | ff2966b23964fe07b19638cb8afe24749144bd97 | refs/heads/master | 2022-12-05T02:59:39.220858 | 2020-04-06T01:40:32 | 2020-04-06T01:40:32 | 253,369,132 | 0 | 0 | MIT | 2022-11-22T05:00:00 | 2020-04-06T01:37:08 | Python | UTF-8 | Python | false | false | 628 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Date_Ocr.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
f478aeaf0498b9d894f8fa4b13a10a86c87251ff | ebcc57cbd7bc4c951fe3cf9826efc2d03d1e47e8 | /educative/05 Cyclic Sort/01 Cyclic Sort (easy).py | db806e81a23f57c1763200df5dc4238ef520c8ee | [] | no_license | Vahid-Esmaeelzadeh/CTCI-Python | 17a672e95f1d886f4fb66239a4aa22a87f38382a | 867360ab13dd63d24d6f3e45b5ac223755942b54 | refs/heads/master | 2022-10-26T16:43:54.939188 | 2020-06-11T21:42:15 | 2020-06-11T21:42:15 | 190,065,582 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 886 | py | '''
Cyclic Sort
We are given an array containing ‘n’ objects. Each object, when created, was assigned a unique number from 1 to ‘n’
based on their creation sequence. This means that the object with sequence number ‘3’ was created just before the
object with sequence number ‘4’.
Write a function to sort the objects in-place on their creation sequence number in O(n) and without any extra space.
For simplicity, let’s assume we are passed an integer array containing only the sequence numbers, though each number
is actually an object.
'''
def cyclic_sort(nums):
i = 0
while i < len(nums):
j = nums[i] - 1
if nums[i] != nums[j]:
nums[i], nums[j] = nums[j], nums[i] # swap
else:
i += 1
return nums
def main():
print(cyclic_sort([3, 1, 5, 4, 2]))
print(cyclic_sort([2, 6, 4, 3, 1, 5]))
main()
| [
"v.esmaeelzadeh@gmail.com"
] | v.esmaeelzadeh@gmail.com |
f2a61a0f7f387402f930c3178fe8175461504e36 | 89841a2b522b7b1ab7965560f62b4b401b2d0a4d | /text to speech python/text_to_speech.py | cfecef0e9236cc11bba0cba88bd60dbf68b2212e | [] | no_license | sidd5sci/python-basics | 14d621d52d3219943e2b0136c610dd769cc36a29 | fea620141292cb6beee782cddb5a7d4eeb067e9a | refs/heads/master | 2021-01-20T00:22:29.496330 | 2017-04-22T17:03:00 | 2017-04-22T17:03:00 | 89,123,461 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | import pyttsx
engine = pyttsx.init()
engine.say('Only one species is there Both male and female are presennt Red ones are the male and black ones are the femaleReproduction occur when male and female collide and having health > 60')
engine.runAndWait()
| [
"sidd5sci@gmail.com"
] | sidd5sci@gmail.com |
26acd2756fd155e15a131af4bb0fd06493c314ab | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_116/ch22_2020_03_04_13_01_47_611677.py | 59962a24516fb71ebe20afd1ff6b9dfe862676ef | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 158 | py | def ft(t,n):
z=(n*(t*360)*(0.00694444))
return z
t=int(input('tempo em anos'))
n=int(input('cigarros por dia'))
print(int(ft(t,n)),('anos perdidos'))
| [
"you@example.com"
] | you@example.com |
b2dcc21f07cc5c003b4b60d9f1824b416014563d | a8547f73463eef517b98d1085430732f442c856e | /numpy/f2py/tests/test_return_complex.py | c201f4955dbec04b51e030f3be61506afcc505cd | [] | no_license | EnjoyLifeFund/macHighSierra-py36-pkgs | 63aece1b692225ee2fbb865200279d7ef88a1eca | 5668b5785296b314ea1321057420bcd077dba9ea | refs/heads/master | 2021-01-23T19:13:04.707152 | 2017-12-25T17:41:30 | 2017-12-25T17:41:30 | 102,808,884 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 105 | py | ../../../../../../Cellar/numpy/1.13.3/lib/python3.6/site-packages/numpy/f2py/tests/test_return_complex.py | [
"Raliclo@gmail.com"
] | Raliclo@gmail.com |
6d08d19c6fffa0eb9daccf141d2adf8d73445373 | 2dbadf8d7c26b3dda69328229b60df160b69f917 | /evaluate_densedepth_nyuv2_labeled.py | 6bbc1409c281972b2b29e944af1ad04946ad7941 | [] | no_license | computational-imaging/spad_single | a17c31d0564a16f08f4768dcc27c064272a5f70d | 54e18e26a6f3c33837da032063e8cf9cc287569e | refs/heads/master | 2022-11-18T08:32:37.513981 | 2020-07-19T04:44:56 | 2020-07-19T04:44:56 | 152,368,443 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,503 | py | #!/usr/bin/env python3
import os
import numpy as np
import torch
from utils.train_utils import init_randomness
from collections import defaultdict
import json
from models.core.checkpoint import load_checkpoint, safe_makedir
from models.data.data_utils.transforms import AddDepthMask
from utils.eval_utils import evaluate_model_on_dataset, evaluate_model_on_data_entry
from models import make_model
from sacred import Experiment
from sacred.observers import FileStorageObserver
# Dataset
from models.data.nyuv2_labeled_dataset import nyuv2_labeled_ingredient, load_data
ex = Experiment('densedepth_nyuv2_labeled', ingredients=[nyuv2_labeled_ingredient])
# Tensorboardx
# writer = SummaryWriter()
@ex.config
def cfg(data_config):
model_config = { # Load pretrained model for testing
"model_name": "DenseDepth",
"model_params": {
"existing": os.path.join("models", "nyu.h5"),
},
"model_state_dict_fn": None
}
ckpt_file = None # Keep as None
save_outputs = True
seed = 95290421 # changing seed does not impact evaluation
small_run = 0
dataset_type = "test"
entry = None
# print(data_config.keys())
output_dir = os.path.join("results",
data_config["data_name"], # e.g. nyu_depth_v2
"{}_{}".format(dataset_type, small_run),
model_config["model_name"]) # e.g. DORN_nyu_nohints
safe_makedir(output_dir)
ex.observers.append(FileStorageObserver.create(os.path.join(output_dir, "runs")))
cuda_device = "0" # The gpu index to run on. Should be a string
os.environ["CUDA_VISIBLE_DEVICES"] = cuda_device
# print("after: {}".format(os.environ["CUDA_VISIBLE_DEVICES"]))
if ckpt_file is not None:
model_update, _, _ = load_checkpoint(ckpt_file)
model_config.update(model_update)
del model_update, _ # So sacred doesn't collect them.
@ex.automain
def main(model_config,
save_outputs,
output_dir,
data_config,
seed,
small_run,
dataset_type,
entry):
# Load the model
model = make_model(**model_config)
# model.sid_obj.to(device)
from tensorboardX import SummaryWriter
from datetime import datetime
model.writer = SummaryWriter(log_dir=os.path.join("runs",
datetime.now().strftime('%b%d'),
datetime.now().strftime('%H-%M-%S_') + \
"densedepth_nohints"))
# Load the data
train, test = load_data(dorn_mode=False)
dataset = train if dataset_type == "train" else test
eval_fn = lambda input_, device: model.evaluate(input_["rgb"],
input_["crop"][0,:],
input_["depth_cropped"],
torch.ones_like(input_["depth_cropped"]))
init_randomness(seed)
if entry is None:
print("Evaluating the model on {}.".format(data_config["data_name"]))
evaluate_model_on_dataset(eval_fn, dataset, small_run, None, save_outputs, output_dir)
else:
print("Evaluating {}".format(entry))
evaluate_model_on_data_entry(eval_fn, dataset, entry, None, save_outputs, output_dir)
| [
"nishimuramarky@yahoo.com"
] | nishimuramarky@yahoo.com |
420b8215ab7148d92a6089821e81d5a6120804d7 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02829/s673828179.py | 69644809e039e9a3784c22e8de1968d8b329c1a6 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 231 | py | #!/usr/bin/python3
import sys
def input():
return sys.stdin.readline().rstrip('\n')
#S = input()
#A1,A2,A3 = list(map(int,input().split()))
A = int(input())
B = int(input())
C = [1,2,3]
C.remove(A)
C.remove(B)
print(C[0])
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
1b5f58b99c29bc28439c90c1f48251d330858219 | ebdc8475416892f79e4eeee354f6e2a909502565 | /generator/dnd.py | fd6314fe5a27e2308b85fbeddcb206a2884fccd4 | [] | no_license | jamesorendorff/ears-handbook | 810e10a5f1b48c206a6302701bef2efcfabb5c9f | 7b76373adf6debc4e4ec34ef49438935d3c87010 | refs/heads/master | 2021-06-14T19:32:14.915706 | 2021-03-23T20:13:04 | 2021-03-23T20:13:04 | 165,413,017 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,655 | py | """
barbarian, bard, cleric, druid, fighter, monk, paladin, ranger, rogue, sorcerer, warlock, wizard
Ideal examples:
The party teams up with a naive gnomish cobbler to face a ruthless drider and her pet violet fungi.
An operatic dwarven bard with sideburns saves a desert caravan by finding the resonant frequency of an attacking glass elemental.
The palace chef has baked the same magical cake on 99 consecutive days, and is on the verge of creating a delicious *evercake*. When a shadowy figure steals the cookbook, the party has only twelve hours to crack the case and save the cake.
A team of dwarvish miners is trapped when a tunnel collapses. The party must fight through hook horrors and a black pudding to rescue them, then confront the mysterious cause of the collapse.
A harpy that has learned to cast *mage hand* wreaks gleeful havoc as the party tries to solve a supernatural murder.
Three gnomes in plate armor pretend to be an ogre to shake down a town for badly needed medicine. (@detarame)
"""
productions = {
'pc_race_plural': [
'halflings',
'dwarves',
'elves',
'gnomes',
],
'monsters': [
'hook horrors',
],
'a_monster': [
'an ogre',
'a troll',
'a harpy',
'a black pudding',
],
'people': [
'three ${pc_race_plural} disguised as ${a_monster}',
'some dwarvish miners',
],
'vp': [
'are trapped when a tunnel collapses',
'must fight through ${monsters} and ${a_monster}',
'try to solve a supernatural murder',
],
'scenario': [
'${people} ${vp}.'
],
}
| [
"jason.orendorff@gmail.com"
] | jason.orendorff@gmail.com |
c772e7330c71059fd7c4a47309c08d0c549056fb | 2af6a5c2d33e2046a1d25ae9dd66d349d3833940 | /res/scripts/common/lrucache.py | a11c1212522351e66f34f68e2ae40dff2282bf90 | [] | no_license | webiumsk/WOT-0.9.12-CT | e6c8b5bb106fad71b5c3056ada59fb1aebc5f2b2 | 2506e34bd6634ad500b6501f4ed4f04af3f43fa0 | refs/heads/master | 2021-01-10T01:38:38.080814 | 2015-11-11T00:08:04 | 2015-11-11T00:08:04 | 45,803,240 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 1,099 | py | # 2015.11.10 21:31:34 Střední Evropa (běžný čas)
# Embedded file name: scripts/common/LRUCache.py
import collections
class LRUCache(object):
def __init__(self, limit):
self.__cache = collections.OrderedDict()
self.__limit = limit
def get(self, key):
try:
value = self.__cache.pop(key)
self.__cache[key] = value
return value
except KeyError:
return None
return None
def peek(self, key):
return self.__cache.get(key, None)
def set(self, key, value):
try:
self.__cache.pop(key)
except KeyError:
if len(self.__cache) >= self.__limit:
self.__cache.popitem(last=False)
self.__cache[key] = value
def pop(self, key):
return self.__cache.pop(key, None)
def clear(self):
self.__cache.clear()
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\common\lrucache.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2015.11.10 21:31:34 Střední Evropa (běžný čas)
| [
"info@webium.sk"
] | info@webium.sk |
ff83a27884448a170a0eb59ab57a4f65255e150b | 9fb1c85a6d39c08e2a3cc235335bc482ad909b71 | /prowler/providers/aws/services/secretsmanager/secretsmanager_service.py | c11455f11f7372738a8070a3f1b5cd27b350d597 | [
"Apache-2.0"
] | permissive | muharihar/prowler | 06dbdeaa0696dd65d72c33ff3c9f957b97f83d7a | 25c9bc07b219cc02004cc0b84adcfdcf18d5ad2b | refs/heads/master | 2023-02-18T01:26:54.161003 | 2023-02-10T11:38:13 | 2023-02-10T11:38:13 | 238,623,868 | 0 | 0 | null | 2020-02-06T06:36:36 | 2020-02-06T06:36:35 | null | UTF-8 | Python | false | false | 2,331 | py | import threading
from pydantic import BaseModel
from prowler.lib.logger import logger
from prowler.lib.scan_filters.scan_filters import is_resource_filtered
from prowler.providers.aws.aws_provider import generate_regional_clients
################## SecretsManager
class SecretsManager:
def __init__(self, audit_info):
self.service = "secretsmanager"
self.session = audit_info.audit_session
self.audited_account = audit_info.audited_account
self.audit_resources = audit_info.audit_resources
self.regional_clients = generate_regional_clients(self.service, audit_info)
self.secrets = {}
self.__threading_call__(self.__list_secrets__)
def __get_session__(self):
return self.session
def __threading_call__(self, call):
threads = []
for regional_client in self.regional_clients.values():
threads.append(threading.Thread(target=call, args=(regional_client,)))
for t in threads:
t.start()
for t in threads:
t.join()
def __list_secrets__(self, regional_client):
logger.info("SecretsManager - Listing Secrets...")
try:
list_secrets_paginator = regional_client.get_paginator("list_secrets")
for page in list_secrets_paginator.paginate():
for secret in page["SecretList"]:
if not self.audit_resources or (
is_resource_filtered(secret["ARN"], self.audit_resources)
):
self.secrets[secret["Name"]] = Secret(
arn=secret["ARN"],
name=secret["Name"],
region=regional_client.region,
)
if "RotationEnabled" in secret:
self.secrets[secret["Name"]].rotation_enabled = secret[
"RotationEnabled"
]
except Exception as error:
logger.error(
f"{regional_client.region} --"
f" {error.__class__.__name__}[{error.__traceback__.tb_lineno}]:"
f" {error}"
)
class Secret(BaseModel):
arn: str
name: str
region: str
rotation_enabled: bool = False
| [
"noreply@github.com"
] | muharihar.noreply@github.com |
57e7fec722d44281cff37c91573e894580e27dd1 | 9d84138b3bc2c2b42a306643f0ea8c3fd1bcd09c | /0x22-primegame/0-prime_game.py | e0d1d19d4191d77f6d8f749d59729afde509a940 | [] | no_license | Beardocracy/holbertonschool-interview | d1d93181a04d050316790ca42dfc9760214e1e00 | eb4f0b8610709bbbdcba9fb30fe198674377dcac | refs/heads/main | 2023-07-15T15:28:05.326314 | 2021-08-25T20:40:34 | 2021-08-25T20:40:34 | 281,188,539 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 248 | py | #!/usr/bin/python3
''' Module contains isWinner '''
def isWinner(x, nums):
''' Determines winner of the game '''
if x == 0 or x == -1:
return None
if x == 10 or x == 1000:
return "Maria"
else:
return "Ben"
| [
"travisjbearden@gmail.com"
] | travisjbearden@gmail.com |
2d7f06444d415639c19d531dde10cea2421b50d3 | e59fe240f0359aa32c59b5e9f581db0bfdb315b8 | /galaxy-dist/eggs/bx_python-0.7.1_7b95ff194725-py2.7-linux-x86_64-ucs2.egg/EGG-INFO/scripts/mMK_bitset.py | 8a26870e4040b1f02e7e993675554ca6263f037e | [
"CC-BY-2.5",
"AFL-2.1",
"AFL-3.0",
"CC-BY-3.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | subway/Galaxy-Distribution | dc269a0258471597d483687a0f1dd9e10bd47448 | d16d6f9b6a8b7f41a218c06539863c8ce4d5a73c | refs/heads/master | 2021-06-30T06:26:55.237251 | 2015-07-04T23:55:51 | 2015-07-04T23:55:51 | 15,899,275 | 1 | 2 | null | 2020-10-07T06:17:26 | 2014-01-14T10:47:28 | Groff | UTF-8 | Python | false | false | 4,827 | py | #!/afs/bx.psu.edu/project/pythons/linux-x86_64-ucs2/bin/python2.7
import sys
import bx.align.maf
import bx.bitset
from bx.bitset_builders import *
from itertools import *
from optparse import OptionParser
from rpy import *
def main():
# Parse the command line
parser = OptionParser(usage = "usage: %prog [options] maf_file snp_file neutral_file window_size step_size")
parser.add_option("-o", "--outfile", help = "Specify file for output")
parser.add_option("-s", "--species", type = "string", default = "panTro2")
parser.add_option("-b", "--build", type = "string", default = "hg18")
(options, args) = parser.parse_args()
if len(args) != 5:
parser.error("Incorrect number of arguments")
else:
maf_filename = args[0]
snp_filename = args[1]
neutral_filename = args[2]
window_size = int(args[3])
step_size = int(args[4])
if options.outfile != None:
out_file = open(options.outfile, 'w')
#Generate snp and neutral bitsets
AR_snp_bitsets = binned_bitsets_from_file(open(snp_filename))
neutral_bitsets = binned_bitsets_from_file(open(neutral_filename))
# Generate divergence bitset from maf file
AR_div_bitsets = dict()
chr_lens = dict()
reader = bx.align.maf.Reader( open (maf_filename) )
for block in reader:
comp1 = block.get_component_by_src_start( options.build )
comp2 = block.get_component_by_src_start( options.species )
if comp1 is None or comp2 is None:
continue
# Chromosome, start, and stop of reference species alignment
chr = comp1.src.split( '.' )[1]
start = comp1.start
end = comp1.end
# Get or create bitset for this chromosome
if chr in AR_div_bitsets:
bitset = AR_div_bitsets[chr]
else:
bitset = AR_div_bitsets[chr] = bx.bitset.BinnedBitSet()
chr_lens[chr] = comp1.get_src_size()
# Iterate over text and set diverged bit
pos = start
for ch1, ch2 in izip( comp1.text.upper(), comp2.text.upper() ):
if ch1 == '-': continue
if ch2 == '-':
pos += 1
continue
if ch1 != ch2 and not AR_snp_bitsets[chr][pos]:
bitset.set( pos )
pos += 1
# Debugging Code
# for chr in AR_div_bitsets:
# for pos in range(0, AR_div_bitsets[chr].size):
# if AR_div_bitsets[pos]:
# print >> sys.stderr, chr, pos, pos+1
# Copy div and snp bitsets
nonAR_snp_bitsets = dict()
for chr in AR_snp_bitsets:
nonAR_snp_bitsets[chr] = bx.bitset.BinnedBitSet()
nonAR_snp_bitsets[chr].ior(AR_snp_bitsets[chr])
nonAR_div_bitsets = dict()
for chr in AR_div_bitsets:
nonAR_div_bitsets[chr] = bx.bitset.BinnedBitSet()
nonAR_div_bitsets[chr].ior(AR_div_bitsets[chr])
# Generates AR snps by intersecting with neutral intervals
for chr in AR_snp_bitsets:
AR_snp_bitsets[chr].iand(neutral_bitsets[chr])
# Generates AR divs by intersecting with neutral intervals
for chr in AR_div_bitsets:
AR_div_bitsets[chr].iand(neutral_bitsets[chr])
# Inverts the neutral intervals so now represents nonAR
for chr in neutral_bitsets:
neutral_bitsets[chr].invert()
# Generates nonAR snps by intersecting with masked neutral intervals
for chr in nonAR_snp_bitsets:
nonAR_snp_bitsets[chr].iand(neutral_bitsets[chr])
# Generates nonAR divs by intersecting with masked neutral intervals
for chr in nonAR_div_bitsets:
nonAR_div_bitsets[chr].iand(neutral_bitsets[chr])
for chr in AR_div_bitsets:
for window in range(0, chr_lens[chr] - window_size, step_size):
# neutral_size = neutral_bitsets[chr].count_range(window, window_size)
# if neutral_size < 9200: continue
AR_snp = AR_snp_bitsets[chr].count_range(window, window_size)
AR_div = AR_div_bitsets[chr].count_range(window, window_size)
nonAR_snp = nonAR_snp_bitsets[chr].count_range(window, window_size)
nonAR_div = nonAR_div_bitsets[chr].count_range(window, window_size)
if nonAR_snp >= 6 and nonAR_div >= 6 and AR_snp >= 6 and AR_div >= 6:
MK_pval = MK_chi_pvalue(nonAR_snp, nonAR_div, AR_snp, AR_div)
else:
MK_pval = MK_fisher_pvalue(nonAR_snp, nonAR_div, AR_snp, AR_div)
if options.outfile != None:
out_file.write("%s\t%d\t%d\t%d\t%d\t%d\t%d\t%1.15f\n" % (chr, window, window+window_size, nonAR_snp, nonAR_div, AR_snp, AR_div, MK_pval))
else:
print "%s\t%d\t%d\t%d\t%d\t%d\t%d\t%1.15f" % (chr, window, window+window_size, nonAR_snp, nonAR_div, AR_snp, AR_div, MK_pval)
if options.outfile != None:
out_file.close()
def MK_fisher_pvalue(win_snp, win_div, AR_snp, AR_div):
if win_snp == 0 and win_div == 0 and AR_snp == 0 and AR_div == 0:
return 1.0
fisher_result = r.fisher_test(r.matrix(r.c([win_snp, win_div, AR_snp, AR_div]), nr = 2))
return fisher_result['p.value']
def MK_chi_pvalue(win_snp, win_div, AR_snp, AR_div):
chi_result = r.chisq_test(r.matrix(r.c([win_snp, win_div, AR_snp, AR_div]), nr = 2))
return chi_result['p.value']
main() | [
"sabba_88@hotmail.com"
] | sabba_88@hotmail.com |
7a439418db24e003bfc0ebaf4de35bfea8aa354a | af4d559792c4255d5f26bc078cd176b70c0e643f | /hpsklearn/components/cluster/_kmeans.py | c4e90dbb37b0bd24bb91f61aee16d96e36ac250b | [
"BSD-3-Clause"
] | permissive | hyperopt/hyperopt-sklearn | ec7d5f97ba8fd5a2c283dfec2fa9e0170b61c6ce | 4b3f6fde3a1ded2e71e8373d52c1b51a0239ef91 | refs/heads/master | 2023-08-02T07:19:20.259964 | 2022-12-15T17:53:07 | 2022-12-15T17:53:07 | 8,293,893 | 1,480 | 292 | NOASSERTION | 2022-12-15T17:53:08 | 2013-02-19T16:09:53 | Python | UTF-8 | Python | false | false | 5,567 | py | from hpsklearn.components._base import validate
from hyperopt.pyll import scope, Apply
from hyperopt import hp
from sklearn import cluster
import numpy.typing as npt
import typing
@scope.define
def sklearn_KMeans(*args, **kwargs):
return cluster.KMeans(*args, **kwargs)
@scope.define
def sklearn_MiniBatchKMeans(*args, **kwargs):
return cluster.MiniBatchKMeans(*args, **kwargs)
def _kmeans_n_clusters(name: str):
"""
Declaration search space 'n_clusters' parameter
"""
return scope.int(hp.uniform(name, 1, 20))
def _kmeans_init(name: str):
"""
Declaration search space 'init' parameter
"""
return hp.choice(name, ["k-means++", "random"])
def _kmeans_random_state(name: str):
"""
Declaration search space 'random_state' parameter
"""
return hp.randint(name, 5)
def _kmeans_hp_space(
name_func,
n_clusters: typing.Union[int, Apply] = None,
init: typing.Union[str, callable, npt.ArrayLike, Apply] = None,
verbose: int = 0,
random_state=None
):
"""
Hyper parameter search space for
k means
mini batch k means
"""
hp_space = dict(
n_clusters=_kmeans_n_clusters(name_func("n_clusters")) if n_clusters is None else n_clusters,
init=_kmeans_init(name_func("init")) if init is None else init,
verbose=verbose,
random_state=_kmeans_random_state(name_func("random_state")) if random_state is None else random_state
)
return hp_space
@validate(params=["algorithm"],
validation_test=lambda param: not isinstance(param, str) or param in ["auto", "full", "elkan"],
msg="Invalid parameter '%s' with value '%s'. Value must be 'auto', 'full' or 'elkan'")
def k_means(name: str,
n_init: typing.Union[int, Apply] = None,
max_iter: typing.Union[int, Apply] = None,
tol: typing.Union[float, Apply] = None,
copy_x: bool = True,
algorithm: typing.Union[str, Apply] = None,
**kwargs):
"""
Return a pyll graph with hyperparameters that will construct
a sklearn.cluster.KMeans model.
Args:
name: name | str
n_init: number of times to run k-means algorithm | int
max_iter: maximum number of iterations | int
tol: relative tolerance in regard to Frobenius norm | float
copy_x: modify copy of data | bool
algorithm: K-means algorithm to use | str
See help(hpsklearn.components.cluster._kmeans._kmeans_hp_space)
for info on additional available k means arguments.
"""
def _name(msg):
return f"{name}.k_means_{msg}"
hp_space = _kmeans_hp_space(_name, **kwargs)
hp_space["n_init"] = scope.int(hp.uniform(_name("n_init"), 2, 25)) if n_init is None else n_init
hp_space["max_iter"] = scope.int(hp.uniform(_name("max_iter"), 100, 500)) if max_iter is None else max_iter
hp_space["tol"] = hp.uniform(_name("tol"), 1e-5, 1e-3) if tol is None else tol
hp_space["copy_x"] = copy_x
hp_space["algorithm"] = hp.choice(_name("algorithm"), ["auto", "full", "elkan"]) if algorithm is None else algorithm
return scope.sklearn_KMeans(**hp_space)
def mini_batch_k_means(name: str,
max_iter: typing.Union[int, Apply] = None,
batch_size: typing.Union[int, Apply] = None,
compute_labels: bool = True,
tol: typing.Union[float, Apply] = None,
max_no_improvement: typing.Union[int, Apply] = None,
init_size: int = None,
n_init: typing.Union[int, Apply] = None,
reassignment_ratio: typing.Union[float, Apply] = None,
**kwargs):
"""
Return a pyll graph with hyperparameters that will construct
a sklearn.cluster.KMeans model.
Args:
name: name | str
max_iter: maximum number of iterations | int
batch_size: size of the mini batches | int
compute_labels: compute label assignment and inertia | bool
tol: relative tolerance with regards to Frobenius norm | float
max_no_improvement: early stopping when no improvement found | int
init_size: random samples for initialization | int
n_init: number of times to run k-means algorithm | int
reassignment_ratio: control the fraction for center reassignment | float
See help(hpsklearn.components.cluster._kmeans._kmeans_hp_space)
for info on additional available k means arguments.
"""
def _name(msg):
return f"{name}.mini_batch_k_means_{msg}"
hp_space = _kmeans_hp_space(_name, **kwargs)
hp_space["max_iter"] = scope.int(hp.uniform(_name("max_iter"), 100, 300)) if max_iter is None else max_iter
hp_space["batch_size"] = hp.choice(_name("batch_size"), [256, 512, 1024, 2048]) \
if batch_size is None else batch_size
hp_space["compute_labels"] = compute_labels
hp_space["tol"] = hp.uniform(_name("tol"), 1e-7, 1e-5) if tol is None else tol
hp_space["max_no_improvement"] = scope.int(hp.uniform(_name("max_no_improvement"), 5, 25)) \
if max_no_improvement is None else max_no_improvement
hp_space["init_size"] = init_size
hp_space["n_init"] = hp.choice(_name("n_init"), [1, 2, 3, 4]) if n_init is None else n_init
hp_space["reassignment_ratio"] = hp.uniform(_name("reassignment_ratio"), 0.001, 0.1) \
if reassignment_ratio is None else reassignment_ratio
return scope.sklearn_MiniBatchKMeans(**hp_space)
| [
"38689620+mandjevant@users.noreply.github.com"
] | 38689620+mandjevant@users.noreply.github.com |
008cfa98bc23ee715832fb1c34d7ab9ee9e9aeb9 | f07a21e66c0dde0691142e31378d10527e44e54c | /re-start/018. 뉴스 클러스터링.py | 9a650b34990e0d0aedff2271460484d3213af037 | [] | no_license | cheol-95/Algorithm | 0a3454e5d3fff21ec50ec20dc64341b13cb972dc | 6a130bb0817395550f00c192074d01f5c6443628 | refs/heads/master | 2023-04-28T09:47:04.389059 | 2023-04-16T07:54:36 | 2023-04-16T07:54:36 | 250,749,134 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 677 | py | def solution(str1, str2):
str1 = [(ch_1 + ch_2).upper() for ch_1, ch_2 in zip(str1[:-1], str1[1:]) if ch_1.isalpha() and ch_2.isalpha()]
str2 = [(ch_1 + ch_2).upper() for ch_1, ch_2 in zip(str2[:-1], str2[1:]) if ch_1.isalpha() and ch_2.isalpha()]
if not str1 and not str2:
return 65536
union = 0
for ch_1 in str1:
if ch_1 in str2:
union += 1
str2.remove(ch_1)
empty_set = len(str1) + len(str2)
return int((union / empty_set) * 65536)
str1, str2 = "FRANCE", "french"
# str1, str2 = "aa1+aa2", "AAAA12"
# str1, str2 = "handshake", "shake hands"
# str1, str2 = "E=M*C^2", "e=m*c^2"
print(solution(str1, str2)) | [
"rkdcjf0122@gmail.com"
] | rkdcjf0122@gmail.com |
9a942ee1f83cf77dc5476f44d4d5c59dc7fbc339 | 97aa1181a8305fab0cfc635954c92880460ba189 | /torch/testing/_internal/common_cuda.py | 8db1456cc4c46890f5b40e7723a928f54c30e075 | [
"BSD-2-Clause"
] | permissive | zhujiang73/pytorch_mingw | 64973a4ef29cc10b96e5d3f8d294ad2a721ccacb | b0134a0acc937f875b7c4b5f3cef6529711ad336 | refs/heads/master | 2022-11-05T12:10:59.045925 | 2020-08-22T12:10:32 | 2020-08-22T12:10:32 | 123,688,924 | 8 | 4 | NOASSERTION | 2022-10-17T12:30:52 | 2018-03-03T12:15:16 | C++ | UTF-8 | Python | false | false | 1,302 | py | r"""This file is allowed to initialize CUDA context when imported."""
import torch
import torch.cuda
from torch.testing._internal.common_utils import TEST_NUMBA
TEST_CUDA = torch.cuda.is_available()
TEST_MULTIGPU = TEST_CUDA and torch.cuda.device_count() >= 2
CUDA_DEVICE = TEST_CUDA and torch.device("cuda:0")
# note: if ROCm is targeted, TEST_CUDNN is code for TEST_MIOPEN
TEST_CUDNN = TEST_CUDA and torch.backends.cudnn.is_acceptable(torch.tensor(1., device=CUDA_DEVICE))
TEST_CUDNN_VERSION = torch.backends.cudnn.version() if TEST_CUDNN else 0
if TEST_NUMBA:
import numba.cuda
TEST_NUMBA_CUDA = numba.cuda.is_available()
else:
TEST_NUMBA_CUDA = False
# Used below in `initialize_cuda_context_rng` to ensure that CUDA context and
# RNG have been initialized.
__cuda_ctx_rng_initialized = False
# after this call, CUDA context and RNG must have been initialized on each GPU
def initialize_cuda_context_rng():
global __cuda_ctx_rng_initialized
assert TEST_CUDA, 'CUDA must be available when calling initialize_cuda_context_rng'
if not __cuda_ctx_rng_initialized:
# initialize cuda context and rng for memory tests
for i in range(torch.cuda.device_count()):
torch.randn(1, device="cuda:{}".format(i))
__cuda_ctx_rng_initialized = True
| [
"zhujiangmail@hotmail.com"
] | zhujiangmail@hotmail.com |
d2315f02b2072e2d9a5b1c0dab10bee84c056edc | 07bae7671cac165fb91554343396ee1343c6363d | /xiecheng/coroutineTest1.py | 8c73a90b6602c02e9c3cbd7d24749f72b25d392c | [] | no_license | quyixiao/python_lesson | 7869dfd3aec8f5b6500ae955ae5c50a956f7b4c3 | 81684d06e6f054049fa79b0e63ab528bdc46581f | refs/heads/master | 2021-06-28T08:01:02.937679 | 2021-03-11T10:29:57 | 2021-03-11T10:29:57 | 221,687,833 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 406 | py | # 协程
# 生成器的高级用法
# 是用户空间调试函数的一种实现
# Python3 asyncio 就是协程实现,已经加入到标准库
# Python3.5 使用async,await 关键字直接原生支持协程
# 协程调试器的实现思路
# 有2个生成器A,B
# next(A)后,A执行
def inc():
for x in range(100):
yield x
foo = inc()
print(next(foo))
print(next(foo))
print(next(foo)) | [
"2621048238@qq.com"
] | 2621048238@qq.com |
e7f98e7798e41c2dcf024bd988520a1d7bab7552 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /26P2iwW5WfwPGJyWE_14.py | 70b4f9da3c875f0fff26ec25fd0a3a2fb475e52d | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,702 | py | """
Suppose a student can earn 100% on an exam by getting the answers all correct
or all incorrect. Given a **potentially incomplete** answer key and the
student's answers, write a function that determines whether or not a student
can still score 100%. Incomplete questions are marked with an underscore,
`"_"`.
["A", "_", "C", "_", "B"] # answer key
["A", "D", "C", "E", "B"] # student's solution
➞ True
# Possible for student to get all questions correct.
["B", "_", "B"] # answer key
["B", "D", "C"] # student's solution
➞ False
# First question is correct but third is wrong, so not possible to score 100%.
["T", "_", "F", "F", "F"] # answer key
["F", "F", "T", "T", "T"] # student's solution
➞ True
# Possible for student to get all questions incorrect.
### Examples
possibly_perfect(["B", "A", "_", "_"], ["B", "A", "C", "C"]) ➞ True
possibly_perfect(["A", "B", "A", "_"], ["B", "A", "C", "C"]) ➞ True
possibly_perfect(["A", "B", "C", "_"], ["B", "A", "C", "C"]) ➞ False
possibly_perfect(["B", "_"], ["C", "A"]) ➞ True
possibly_perfect(["B", "A"], ["C", "A"]) ➞ False
possibly_perfect(["B"], ["B"]) ➞ True
### Notes
Test has at least one question.
"""
def possibly_perfect(key, answers):
newKey = []
newAnswers = []
for i, v in enumerate(key):
if v != '_':
newKey.append(key[i])
newAnswers.append(answers[i])
diff = [v for i, v in enumerate(newKey) if newKey[i] != newAnswers[i]]
same = [v for i, v in enumerate(newKey) if newKey[i] == newAnswers[i]]
return len(diff) == 0 or len(same) == 0
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
9c86f4361ab044e82d3fa23ded180195a4735774 | e74463d223acfe6b849177177cb409060e7a44d1 | /Data Structures and Algorithms/02 Data Structures/Week 3 - Priority Queues and Disjoint Sets/assignment/merging_tables.py | 72b14ae5556275a9e47d3d7273377402fe30690f | [] | no_license | AlexEngelhardt-old/courses | 24f4acf6de22f6707568024c5ee4a2fde412e461 | 739be99265b0aca1c58abe6f107b4c49de055b9d | refs/heads/master | 2023-05-05T22:25:50.327739 | 2020-12-09T14:57:46 | 2020-12-09T14:57:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,801 | py | class Database:
def __init__(self, row_counts):
self.row_counts = row_counts
self.max_row_count = max(row_counts)
n_tables = len(row_counts)
self.ranks = [1] * n_tables
self.parents = list(range(n_tables))
def merge(self, src, dst):
src_parent = self.get_parent(src)
dst_parent = self.get_parent(dst)
if src_parent == dst_parent:
return False
# merge two components
# use union by rank heuristic
if self.ranks[src_parent] < self.ranks[dst_parent]:
self.parents[src_parent] = dst_parent
self.row_counts[dst_parent] += self.row_counts[src_parent] # we ignore the row_counts of all non-root nodes; they will be wrong and useless
self.max_row_count = max(self.max_row_count, self.row_counts[dst_parent])
else:
self.parents[dst_parent] = src_parent
self.row_counts[src_parent] += self.row_counts[dst_parent]
self.max_row_count = max(self.max_row_count, self.row_counts[src_parent])
if self.ranks[src_parent] == self.ranks[dst_parent]:
self.ranks[src_parent] += 1
return True
def get_parent(self, table):
# find parent and compress path
# TODO I haven't done the path compression
while table != self.parents[table]:
table = self.parents[table]
return self.parents[table]
def main():
n_tables, n_queries = map(int, input().split())
counts = list(map(int, input().split()))
assert len(counts) == n_tables
db = Database(counts)
for i in range(n_queries):
dst, src = map(int, input().split())
db.merge(dst - 1, src - 1)
print(db.max_row_count)
if __name__ == "__main__":
main()
| [
"alexander.w.engelhardt@gmail.com"
] | alexander.w.engelhardt@gmail.com |
df195bc9e2840dd23b24d0d4163d02eb205b80ca | 9047328d03d38c0833193987a9409600200d83bc | /myutils/counter.py | 4fd1cb06eada5e7eec2980c5adccbfc77ea730ca | [] | no_license | teddyxiong53/Python | 06d444f89d14ae5071248d93ea973fd1d9ad2795 | 629775569cb94968bb8a4e34e31871fcc1bd2969 | refs/heads/master | 2020-04-05T23:33:13.155112 | 2019-10-24T09:37:04 | 2019-10-24T09:37:04 | 68,708,738 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 192 | py | class Counter(storage):
def add(self, n):
self.setdefault(n, 0)
self[n] += 1
def most(self):
m = max(self.itervalues())
return [k for k,v in self.iteritems if v == m]
| [
"1073167306@qq.com"
] | 1073167306@qq.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.