blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9af39db942c3e1dde49345526d12a0c37972f44a
|
739e41d4f24f79c772d266cded0de9b759c6e953
|
/venv/lib/python3.6/site-packages/datasets/__init__.py
|
eb40f7f2cfc457e3e42b84454b398c6c73b9dd70
|
[
"MIT"
] |
permissive
|
MachineLearningBCAM/Minimax-risk-classifiers-NeurIPS-2020
|
24b7bbdecf459292f8b58be286feab3b9aa341ba
|
82586c632268c103de269bcbffa5f7849b174a29
|
refs/heads/main
| 2023-05-18T15:41:13.495286
| 2021-06-11T18:21:35
| 2021-06-11T18:21:35
| 304,268,819
| 3
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,246
|
py
|
# flake8: noqa
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "1.0.2"
import pyarrow
from pyarrow import total_allocated_bytes
from . import datasets
from .arrow_dataset import Dataset, concatenate_datasets
from .arrow_reader import ArrowReader, ReadInstruction
from .arrow_writer import ArrowWriter
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .dataset_dict import DatasetDict
from .features import (
Array2D,
Array3D,
Array4D,
Array5D,
ClassLabel,
Features,
Sequence,
Translation,
TranslationVariableLanguages,
Value,
)
from .info import DatasetInfo, MetricInfo
from .inspect import inspect_dataset, inspect_metric, list_datasets, list_metrics
from .load import import_main_class, load_dataset, load_from_disk, load_metric, prepare_module
from .metric import Metric
from .splits import NamedSplit, Split, SplitBase, SplitDict, SplitGenerator, SplitInfo, SubSplitInfo, percent
from .utils import *
from .utils.tqdm_utils import disable_progress_bar
if int(pyarrow.__version__.split(".")[1]) < 16 and int(pyarrow.__version__.split(".")[0]) == 0:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=0.16.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
SCRIPTS_VERIONS = __version__
|
[
"adiaz@bcamath.org"
] |
adiaz@bcamath.org
|
9685449d5e5645b6666498b20dc26c867e5091a9
|
946748c5c4bc70df42e852cc285a0fe5317bec45
|
/ax/core/experiment.py
|
1033e9b88d204cbedcbf68fb785d40d1e1cee43d
|
[
"MIT"
] |
permissive
|
isabella232/Ax
|
34cf41fde3f7ed7f478149b83aa8b72bfa8f5116
|
051054c6f21617937fc263af174773fa55c37818
|
refs/heads/master
| 2023-04-11T11:49:22.097326
| 2021-04-17T17:22:53
| 2021-04-17T17:25:14
| 359,406,904
| 0
| 0
|
MIT
| 2021-04-19T09:51:19
| 2021-04-19T09:46:11
| null |
UTF-8
|
Python
| false
| false
| 35,976
|
py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from collections import OrderedDict, defaultdict
from datetime import datetime
from enum import Enum
from functools import reduce
from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Type
import pandas as pd
from ax.core.abstract_data import AbstractDataFrameData
from ax.core.arm import Arm
from ax.core.base_trial import BaseTrial, TrialStatus
from ax.core.batch_trial import BatchTrial
from ax.core.data import Data
from ax.core.generator_run import GeneratorRun
from ax.core.map_data import MapData
from ax.core.metric import Metric
from ax.core.optimization_config import OptimizationConfig
from ax.core.parameter import Parameter
from ax.core.runner import Runner
from ax.core.search_space import SearchSpace
from ax.core.trial import Trial
from ax.exceptions.core import UnsupportedError
from ax.utils.common.base import Base
from ax.utils.common.constants import Keys
from ax.utils.common.docutils import copy_doc
from ax.utils.common.logger import get_logger
from ax.utils.common.timeutils import current_timestamp_in_millis
logger: logging.Logger = get_logger(__name__)
class DataType(Enum):
DATA = 1
MAP_DATA = 2
DATA_TYPE_LOOKUP: Dict[DataType, Type] = {
DataType.DATA: Data,
DataType.MAP_DATA: MapData,
}
# pyre-fixme[13]: Attribute `_search_space` is never initialized.
class Experiment(Base):
"""Base class for defining an experiment."""
def __init__(
self,
search_space: SearchSpace,
name: Optional[str] = None,
optimization_config: Optional[OptimizationConfig] = None,
tracking_metrics: Optional[List[Metric]] = None,
runner: Optional[Runner] = None,
status_quo: Optional[Arm] = None,
description: Optional[str] = None,
is_test: bool = False,
experiment_type: Optional[str] = None,
properties: Optional[Dict[str, Any]] = None,
default_data_type: Optional[DataType] = None,
) -> None:
"""Inits Experiment.
Args:
search_space: Search space of the experiment.
name: Name of the experiment.
optimization_config: Optimization config of the experiment.
tracking_metrics: Additional tracking metrics not used for optimization.
runner: Default runner used for trials on this experiment.
status_quo: Arm representing existing "control" arm.
description: Description of the experiment.
is_test: Convenience metadata tracker for the user to mark test experiments.
experiment_type: The class of experiments this one belongs to.
properties: Dictionary of this experiment's properties.
default_data_type: Enum representing the data type this experiment uses.
"""
# appease pyre
self._search_space: SearchSpace
self._status_quo: Optional[Arm] = None
self._name = name
self.description = description
self.runner = runner
self.is_test = is_test
self._data_by_trial: Dict[int, OrderedDict[int, AbstractDataFrameData]] = {}
self._experiment_type: Optional[str] = experiment_type
self._optimization_config = None
self._tracking_metrics: Dict[str, Metric] = {}
self._time_created: datetime = datetime.now()
self._trials: Dict[int, BaseTrial] = {}
self._properties: Dict[str, Any] = properties or {}
self._default_data_type = default_data_type or DataType.DATA
# Used to keep track of whether any trials on the experiment
# specify a TTL. Since trials need to be checked for their TTL's
# expiration often, having this attribute helps avoid unnecessary
# TTL checks for experiments that do not use TTL.
self._trials_have_ttl = False
# Make sure all statuses appear in this dict, to avoid key errors.
self._trial_indices_by_status: Dict[TrialStatus, Set[int]] = {
status: set() for status in TrialStatus
}
self._arms_by_signature: Dict[str, Arm] = {}
self._arms_by_name: Dict[str, Arm] = {}
self.add_tracking_metrics(tracking_metrics or [])
# call setters defined below
self.search_space = search_space
self.status_quo = status_quo
if optimization_config is not None:
self.optimization_config = optimization_config
@property
def has_name(self) -> bool:
"""Return true if experiment's name is not None."""
return self._name is not None
@property
def name(self) -> str:
"""Get experiment name. Throws if name is None."""
if self._name is None:
raise ValueError("Experiment's name is None.")
# pyre-fixme[7]: Expected `str` but got `Optional[str]`.
return self._name
@name.setter
def name(self, name: str) -> None:
"""Set experiment name."""
self._name = name
@property
def is_simple_experiment(self):
"""Whether this experiment is a regular Experiment or the subclassing
`SimpleExperiment`."""
return False
@property
def time_created(self) -> datetime:
"""Creation time of the experiment."""
return self._time_created
@property
def experiment_type(self) -> Optional[str]:
"""The type of the experiment."""
return self._experiment_type
@experiment_type.setter
def experiment_type(self, experiment_type: Optional[str]) -> None:
"""Set the type of the experiment."""
self._experiment_type = experiment_type
@property
def search_space(self) -> SearchSpace:
"""The search space for this experiment.
When setting a new search space, all parameter names and types
must be preserved. However, if no trials have been created, all
modifications are allowed.
"""
# TODO: maybe return a copy here to guard against implicit changes
return self._search_space
@search_space.setter
def search_space(self, search_space: SearchSpace) -> None:
# Allow all modifications when no trials present.
if not hasattr(self, "_search_space") or len(self.trials) < 1:
self._search_space = search_space
return
# At least 1 trial is present.
if self.immutable_search_space_and_opt_config:
raise UnsupportedError(
"Modifications of search space are disabled by the "
f"`{Keys.IMMUTABLE_SEARCH_SPACE_AND_OPT_CONF.value}` "
"property that is set to `True` on this experiment."
)
if len(search_space.parameters) < len(self._search_space.parameters):
raise ValueError(
"New search_space must contain all parameters in the existing."
)
for param_name, parameter in search_space.parameters.items():
if param_name not in self._search_space.parameters:
raise ValueError(
f"Cannot add new parameter `{param_name}` because "
"it is not defined in the existing search space."
)
elif (
parameter.parameter_type
!= self._search_space.parameters[param_name].parameter_type
):
raise ValueError(
f"Expected parameter `{param_name}` to be of type "
f"{self._search_space.parameters[param_name].parameter_type}, "
f"got {parameter.parameter_type}."
)
self._search_space = search_space
@property
def status_quo(self) -> Optional[Arm]:
"""The existing arm that new arms will be compared against."""
return self._status_quo
@status_quo.setter
def status_quo(self, status_quo: Optional[Arm]) -> None:
if status_quo is not None:
self.search_space.check_types(status_quo.parameters, raise_error=True)
# Compute a unique name if "status_quo" is taken
name = "status_quo"
sq_idx = 0
arms_by_name = self.arms_by_name
while name in arms_by_name:
name = f"status_quo_e{sq_idx}"
sq_idx += 1
self._name_and_store_arm_if_not_exists(arm=status_quo, proposed_name=name)
# If old status_quo not present in any trials,
# remove from _arms_by_signature
if self._status_quo is not None:
persist_old_sq = False
for trial in self._trials.values():
# pyre-fixme[16]: `Optional` has no attribute `name`.
if self._status_quo.name in trial.arms_by_name:
persist_old_sq = True
break
if not persist_old_sq:
# pyre-fixme[16]: `Optional` has no attribute `signature`.
self._arms_by_signature.pop(self._status_quo.signature)
self._arms_by_name.pop(self._status_quo.name)
self._status_quo = status_quo
@property
def parameters(self) -> Dict[str, Parameter]:
"""The parameters in the experiment's search space."""
return self.search_space.parameters
@property
def arms_by_name(self) -> Dict[str, Arm]:
"""The arms belonging to this experiment, by their name."""
return self._arms_by_name
@property
def arms_by_signature(self) -> Dict[str, Arm]:
"""The arms belonging to this experiment, by their signature."""
return self._arms_by_signature
@property
def sum_trial_sizes(self) -> int:
"""Sum of numbers of arms attached to each trial in this experiment."""
return reduce(lambda a, b: a + len(b.arms_by_name), self._trials.values(), 0)
@property
def num_abandoned_arms(self) -> int:
"""How many arms attached to this experiment are abandoned."""
abandoned = set()
for trial in self.trials.values():
for x in trial.abandoned_arms:
abandoned.add(x)
return len(abandoned)
@property
def optimization_config(self) -> Optional[OptimizationConfig]:
"""The experiment's optimization config."""
return self._optimization_config
@optimization_config.setter
def optimization_config(self, optimization_config: OptimizationConfig) -> None:
if (
getattr(self, "_optimization_config", None) is not None
and self.immutable_search_space_and_opt_config
):
raise UnsupportedError(
"Modifications of optimization config are disabled by the "
f"`{Keys.IMMUTABLE_SEARCH_SPACE_AND_OPT_CONF.value}` "
"property that is set to `True` on this experiment."
)
for metric_name in optimization_config.metrics.keys():
if metric_name in self._tracking_metrics:
self.remove_tracking_metric(metric_name)
self._optimization_config = optimization_config
@property
def data_by_trial(self) -> Dict[int, OrderedDict]:
"""Data stored on the experiment, indexed by trial index and storage time.
First key is trial index and second key is storage time in milliseconds.
For a given trial, data is ordered by storage time, so first added data
will appear first in the list.
"""
return self._data_by_trial
@property
def immutable_search_space_and_opt_config(self) -> bool:
"""Boolean representing whether search space and metrics on this experiment
are mutable (by default they are).
NOTE: For experiments with immutable search spaces and metrics, generator
runs will not store copies of search space and metrics, which improves
storage layer performance. Not keeping copies of those on generator runs
also disables keeping track of changes to search space and metrics,
thereby necessitating that those attributes be immutable on experiment.
"""
return self._properties.get(Keys.IMMUTABLE_SEARCH_SPACE_AND_OPT_CONF, False)
def add_tracking_metric(self, metric: Metric) -> "Experiment":
"""Add a new metric to the experiment.
Args:
metric: Metric to be added.
"""
if metric.name in self._tracking_metrics:
raise ValueError(
f"Metric `{metric.name}` already defined on experiment. "
"Use `update_tracking_metric` to update an existing metric definition."
)
optimization_config = self.optimization_config
if optimization_config and metric.name in optimization_config.metrics:
raise ValueError(
f"Metric `{metric.name}` already present in experiment's "
"OptimizationConfig. Set a new OptimizationConfig without this metric "
"before adding it to tracking metrics."
)
self._tracking_metrics[metric.name] = metric
return self
def add_tracking_metrics(self, metrics: List[Metric]) -> "Experiment":
"""Add a list of new metrics to the experiment.
If any of the metrics are already defined on the experiment,
we raise an error and don't add any of them to the experiment
Args:
metrics: Metrics to be added.
"""
# Before setting any metrics, we validate none are already on
# the experiment
for metric in metrics:
self.add_tracking_metric(metric)
return self
def update_tracking_metric(self, metric: Metric) -> "Experiment":
"""Redefine a metric that already exists on the experiment.
Args:
metric: New metric definition.
"""
if metric.name not in self._tracking_metrics:
raise ValueError(f"Metric `{metric.name}` doesn't exist on experiment.")
self._tracking_metrics[metric.name] = metric
return self
def remove_tracking_metric(self, metric_name: str) -> "Experiment":
"""Remove a metric that already exists on the experiment.
Args:
metric_name: Unique name of metric to remove.
"""
if metric_name not in self._tracking_metrics:
raise ValueError(f"Metric `{metric_name}` doesn't exist on experiment.")
del self._tracking_metrics[metric_name]
return self
@property
def metrics(self) -> Dict[str, Metric]:
"""The metrics attached to the experiment."""
optimization_config_metrics: Dict[str, Metric] = {}
if self.optimization_config is not None:
# pyre-fixme[16]: `Optional` has no attribute `metrics`.
optimization_config_metrics = self.optimization_config.metrics
return {**self._tracking_metrics, **optimization_config_metrics}
def _metrics_by_class(
self, metrics: Optional[List[Metric]] = None
) -> Dict[Type[Metric], List[Metric]]:
metrics_by_class: Dict[Type[Metric], List[Metric]] = defaultdict(list)
for metric in metrics or list(self.metrics.values()):
# By default, all metrics are grouped by their class for fetch;
# however, for some metrics, `fetch_trial_data_multi` of a
# superclass is used for fetch the subclassing metrics' data. In
# those cases, "fetch_multi_group_by_metric" property on metric
# will be set to a class other than its own (likely a superclass).
metrics_by_class[metric.fetch_multi_group_by_metric].append(metric)
return metrics_by_class
def fetch_data(
self, metrics: Optional[List[Metric]] = None, **kwargs: Any
) -> AbstractDataFrameData:
"""Fetches data for all trials on this experiment and for either the
specified metrics or all metrics currently on the experiment, if `metrics`
argument is not specified.
NOTE: For metrics that are not available while trial is running, the data
may be retrieved from cache on the experiment. Data is cached on the experiment
via calls to `experiment.attach_data` and whetner a given metric class is
available while trial is running is determined by the boolean returned from its
`is_available_while_running` class method.
Args:
metrics: If provided, fetch data for these metrics instead of the ones
defined on the experiment.
kwargs: keyword args to pass to underlying metrics' fetch data functions.
Returns:
Data for the experiment.
"""
return self._lookup_or_fetch_trials_data(
trials=list(self.trials.values()), metrics=metrics, **kwargs
)
def fetch_trials_data(
self,
trial_indices: Iterable[int],
metrics: Optional[List[Metric]] = None,
**kwargs: Any,
) -> AbstractDataFrameData:
"""Fetches data for specific trials on the experiment.
NOTE: For metrics that are not available while trial is running, the data
may be retrieved from cache on the experiment. Data is cached on the experiment
via calls to `experiment.attach_data` and whetner a given metric class is
available while trial is running is determined by the boolean returned from its
`is_available_while_running` class method.
Args:
trial_indices: Indices of trials, for which to fetch data.
metrics: If provided, fetch data for these metrics instead of the ones
defined on the experiment.
kwargs: Keyword args to pass to underlying metrics' fetch data functions.
Returns:
Data for the specific trials on the experiment.
"""
return self._lookup_or_fetch_trials_data(
trials=self.get_trials_by_indices(trial_indices=trial_indices),
metrics=metrics,
**kwargs,
)
def _lookup_or_fetch_trials_data(
self,
trials: List[BaseTrial],
metrics: Optional[Iterable[Metric]] = None,
**kwargs: Any,
) -> AbstractDataFrameData:
if not self.metrics and not metrics:
raise ValueError(
"No metrics to fetch data for, as no metrics are defined for "
"this experiment, and none were passed in to `fetch_data`."
)
if not any(t.status.expecting_data for t in trials):
logger.info("No trials are in a state expecting data. Returning empty data")
return self.default_data_constructor()
metrics_to_fetch = list(metrics or self.metrics.values())
metrics_by_class = self._metrics_by_class(metrics=metrics_to_fetch)
data_list = []
for metric_cls in metrics_by_class:
data_list.append(
metric_cls.lookup_or_fetch_experiment_data_multi(
experiment=self,
metrics=metrics_by_class[metric_cls],
trials=trials,
**kwargs,
)
)
return self.default_data_constructor.from_multiple_data(data=data_list)
@copy_doc(BaseTrial.fetch_data)
def _fetch_trial_data(
self, trial_index: int, metrics: Optional[List[Metric]] = None, **kwargs: Any
) -> AbstractDataFrameData:
trial = self.trials[trial_index]
return self._lookup_or_fetch_trials_data(
trials=[trial], metrics=metrics, **kwargs
)
def attach_data(
self, data: AbstractDataFrameData, combine_with_last_data: bool = False
) -> int:
"""Attach data to experiment. Stores data in `experiment._data_by_trial`,
to be looked up via `experiment.lookup_data_for_trial`.
Args:
data: Data object to store.
combine_with_last_data: By default, when attaching data, it's identified
by its timestamp, and `experiment.lookup_data_for_trial` returns
data by most recent timestamp. In some cases, however, the goal
is to combine all data attached for a trial into a single Data
object. To achieve that goal, every call to `attach_data` after
the initial data is attached to trials, should be set to `True`.
Then, the newly attached data will be appended to existing data,
rather than stored as a separate object, and `lookup_data_for_trial`
will return the combined data object, rather than just the most
recently added data. This will validate that the newly added data
does not contain observations for the metrics that already have
observations in the most recent data stored.
Returns:
Timestamp of storage in millis.
"""
data_type = type(data)
data_init_args = data.serialize_init_args(data)
if data.df.empty:
raise ValueError("Data to attach is empty.")
metrics_not_on_exp = set(data.df["metric_name"].values) - set(
self.metrics.keys()
)
if metrics_not_on_exp:
logger.info(
f"Attached data has some metrics ({metrics_not_on_exp}) that are "
"not among the metrics on this experiment. Note that attaching data "
"will not automatically add those metrics to the experiment. "
"For these metrics to be automatically fetched by `experiment."
"fetch_data`, add them via `experiment.add_tracking_metric` or update "
"the experiment's optimization config."
)
cur_time_millis = current_timestamp_in_millis()
for trial_index, trial_df in data.df.groupby(data.df["trial_index"]):
current_trial_data = (
self._data_by_trial[trial_index]
if trial_index in self._data_by_trial
else OrderedDict()
)
if combine_with_last_data and len(current_trial_data) > 0:
last_ts, last_data = list(current_trial_data.items())[-1]
merged = pd.merge(
last_data.df,
trial_df,
on=["trial_index", "metric_name", "arm_name"],
how="inner",
)
if not merged.empty:
raise ValueError(
f"Last data for trial {trial_index} already contained an "
f"observation for metric {merged.head()['metric_name']}."
)
last_data_type = type(last_data)
# pyre-ignore [6]: 2nd Param is `AbstractData`,
# but we know class is concrete.
current_trial_data[cur_time_millis] = last_data_type.from_multiple_data(
[
last_data,
# pyre-ignore [45]: Cannot instantiate abstract class.
# But we know the class is concrete.
last_data_type(trial_df, **data_init_args),
]
)
else:
# pyre-ignore [45]: Cannot instantiate `AbstractDataFrameData`.
current_trial_data[cur_time_millis] = data_type(
trial_df, **data_init_args
)
self._data_by_trial[trial_index] = current_trial_data
return cur_time_millis
def lookup_data_for_ts(self, timestamp: int) -> AbstractDataFrameData:
"""Collect data for all trials stored at this timestamp.
Useful when many trials' data was fetched and stored simultaneously
and user wants to retrieve same collection of data later.
Can also be used to lookup specific data for a single trial
when storage time is known.
Args:
timestamp: Timestamp in millis at which data was stored.
Returns:
Data object with all data stored at the timestamp.
"""
trial_datas = []
for _trial_index, ts_to_data in self._data_by_trial.items():
if timestamp in ts_to_data:
trial_datas.append(ts_to_data[timestamp])
return self.default_data_constructor.from_multiple_data(trial_datas)
def lookup_data_for_trial(
self, trial_index: int
) -> Tuple[AbstractDataFrameData, int]:
"""Lookup stored data for a specific trial.
Returns latest data object, and its storage timestamp, present for this trial.
Returns empty data and -1 if no data present.
Args:
trial_index: The index of the trial to lookup data for.
Returns:
The requested data object, and its storage timestamp in milliseconds.
"""
try:
trial_data_dict = self._data_by_trial[trial_index]
except KeyError:
return (self.default_data_constructor(), -1)
if len(trial_data_dict) == 0:
return (self.default_data_constructor(), -1)
storage_time = max(trial_data_dict.keys())
trial_data = trial_data_dict[storage_time]
return trial_data, storage_time
@property
def num_trials(self) -> int:
"""How many trials are associated with this experiment."""
return len(self._trials)
@property
def trials(self) -> Dict[int, BaseTrial]:
"""The trials associated with the experiment.
NOTE: If some trials on this experiment specify their TTL, `RUNNING` trials
will be checked for whether their TTL elapsed during this call. Found past-
TTL trials will be marked as `FAILED`.
"""
self._check_TTL_on_running_trials()
return self._trials
@property
def trials_by_status(self) -> Dict[TrialStatus, List[BaseTrial]]:
"""Trials associated with the experiment, grouped by trial status."""
# Make sure all statuses appear in this dict, to avoid key errors.
return {
status: self.get_trials_by_indices(trial_indices=idcs)
for status, idcs in self.trial_indices_by_status.items()
}
@property
def trials_expecting_data(self) -> List[BaseTrial]:
"""List[BaseTrial]: the list of all trials for which data has arrived
or is expected to arrive.
"""
return [trial for trial in self.trials.values() if trial.status.expecting_data]
@property
def trial_indices_by_status(self) -> Dict[TrialStatus, Set[int]]:
"""Indices of trials associated with the experiment, grouped by trial
status.
"""
self._check_TTL_on_running_trials() # Marks past-TTL trials as failed.
return self._trial_indices_by_status
@property
def default_data_type(self) -> DataType:
return self._default_data_type
@property
def default_data_constructor(self) -> Type:
return DATA_TYPE_LOOKUP[self.default_data_type]
def new_trial(
self,
generator_run: Optional[GeneratorRun] = None,
trial_type: Optional[str] = None,
ttl_seconds: Optional[int] = None,
) -> Trial:
"""Create a new trial associated with this experiment.
Args:
generator_run: GeneratorRun, associated with this trial.
Trial has only one arm attached to it and this generator_run
must therefore contain one arm. This arm can also be set later
through `add_arm` or `add_generator_run`, but a trial's
associated generator run is immutable once set.
trial_type: Type of this trial, if used in MultiTypeExperiment.
ttl_seconds: If specified, trials will be considered failed after
this many seconds since the time the trial was ran, unless the
trial is completed before then. Meant to be used to detect
'dead' trials, for which the evaluation process might have
crashed etc., and which should be considered failed after
their 'time to live' has passed.
"""
if ttl_seconds is not None:
self._trials_have_ttl = True
return Trial(
experiment=self,
trial_type=trial_type,
generator_run=generator_run,
ttl_seconds=ttl_seconds,
)
def new_batch_trial(
self,
generator_run: Optional[GeneratorRun] = None,
trial_type: Optional[str] = None,
optimize_for_power: Optional[bool] = False,
ttl_seconds: Optional[int] = None,
) -> BatchTrial:
"""Create a new batch trial associated with this experiment.
Args:
generator_run: GeneratorRun, associated with this trial. This can a
also be set later through `add_arm` or `add_generator_run`, but a
trial's associated generator run is immutable once set.
trial_type: Type of this trial, if used in MultiTypeExperiment.
optimize_for_power: Whether to optimize the weights of arms in this
trial such that the experiment's power to detect effects of
certain size is as high as possible. Refer to documentation of
`BatchTrial.set_status_quo_and_optimize_power` for more detail.
ttl_seconds: If specified, trials will be considered failed after
this many seconds since the time the trial was ran, unless the
trial is completed before then. Meant to be used to detect
'dead' trials, for which the evaluation process might have
crashed etc., and which should be considered failed after
their 'time to live' has passed.
"""
if ttl_seconds is not None:
self._trials_have_ttl = True
return BatchTrial(
experiment=self,
trial_type=trial_type,
generator_run=generator_run,
optimize_for_power=optimize_for_power,
ttl_seconds=ttl_seconds,
)
def get_trials_by_indices(self, trial_indices: Iterable[int]) -> List[BaseTrial]:
"""Grabs trials on this experiment by their indices."""
trial_indices = list(trial_indices)
try:
return [self.trials[idx] for idx in trial_indices]
except KeyError:
missing = set(trial_indices) - set(self.trials)
raise ValueError(
f"Trial indices {missing} are not associated with the experiment."
)
def reset_runners(self, runner: Runner) -> None:
"""Replace all candidate trials runners.
Args:
runner: New runner to replace with.
"""
for trial in self._trials.values():
if trial.status == TrialStatus.CANDIDATE:
trial.runner = runner
self.runner = runner
def _attach_trial(self, trial: BaseTrial, index: Optional[int] = None) -> int:
"""Attach a trial to this experiment.
Should only be called within the trial constructor.
Args:
trial: The trial to be attached.
index: If specified, the trial's index will be set accordingly.
This should generally not be specified, as the index
will be automatically determined based on the number
of existing trials. This is only used for the purpose
of loading from storage.
Returns:
The index of the trial within the experiment's trial list.
"""
if trial.experiment is not self:
raise ValueError("BatchTrial does not belong to this experiment.")
for existing_trial in self._trials.values():
if existing_trial is trial:
raise ValueError("BatchTrial already attached to experiment.")
if index is not None and index in self._trials:
logger.debug( # pragma: no cover
f"Trial index {index} already exists on the experiment. Overwriting."
)
index = (
index
if index is not None
else (0 if len(self._trials) == 0 else max(self._trials.keys()) + 1)
)
self._trials[index] = trial
return index
def _name_and_store_arm_if_not_exists(self, arm: Arm, proposed_name: str) -> None:
"""Tries to lookup arm with same signature, otherwise names and stores it.
- Looks up if arm already exists on experiment
- If so, name the input arm the same as the existing arm
- else name the arm with given name and store in _arms_by_signature
Args:
arm: The arm object to name.
proposed_name: The name to assign if it doesn't have one already.
"""
# If arm is identical to an existing arm, return that
# so that the names match.
if arm.signature in self.arms_by_signature:
existing_arm = self.arms_by_signature[arm.signature]
if arm.has_name:
if arm.name != existing_arm.name:
raise ValueError(
f"Arm already exists with name {existing_arm.name}, "
f"which doesn't match given arm name of {arm.name}."
)
else:
arm.name = existing_arm.name
else:
if not arm.has_name:
arm.name = proposed_name
self._register_arm(arm)
def _register_arm(self, arm: Arm) -> None:
"""Add a new arm to the experiment, updating the relevant
lookup dictionaries.
Args:
arm: Arm to add
"""
self._arms_by_signature[arm.signature] = arm
self._arms_by_name[arm.name] = arm
def _check_TTL_on_running_trials(self) -> None:
"""Checks whether any past-TTL trials are still marked as `RUNNING`
and marks them as failed if so.
NOTE: this function just calls `trial.status` for each trial, as the
computation of that property checks the TTL for trials.
"""
if not self._trials_have_ttl:
return
running = list(self._trial_indices_by_status[TrialStatus.RUNNING])
for idx in running:
self._trials[idx].status # `status` property checks TTL if applicable.
def __repr__(self) -> str:
return self.__class__.__name__ + f"({self._name})"
# --- MultiTypeExperiment convenience functions ---
#
# Certain functionalities have special behavior for multi-type experiments.
# This defines the base behavior for regular experiments that will be
# overridden in the MultiTypeExperiment class.
@property
def default_trial_type(self) -> Optional[str]:
"""Default trial type assigned to trials in this experiment.
In the base experiment class this is always None. For experiments
with multiple trial types, use the MultiTypeExperiment class.
"""
return None
def runner_for_trial(self, trial: BaseTrial) -> Optional[Runner]:
"""The default runner to use for a given trial.
In the base experiment class, this is always the default experiment runner.
For experiments with multiple trial types, use the MultiTypeExperiment class.
"""
return self.runner
def supports_trial_type(self, trial_type: Optional[str]) -> bool:
"""Whether this experiment allows trials of the given type.
The base experiment class only supports None. For experiments
with multiple trial types, use the MultiTypeExperiment class.
"""
return trial_type is None
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
b21cdd6b34ce845121a1bedbe1cbd7d5fcb287c1
|
2834f98b53d78bafc9f765344ded24cf41ffebb0
|
/weblayer/renderer/DEPS
|
e83c233386cd4f75dd17543c345adb86342e4984
|
[
"BSD-3-Clause"
] |
permissive
|
cea56/chromium
|
81bffdf706df8b356c2e821c1a299f9d4bd4c620
|
013d244f2a747275da76758d2e6240f88c0165dd
|
refs/heads/master
| 2023-01-11T05:44:41.185820
| 2019-12-09T04:14:16
| 2019-12-09T04:14:16
| 226,785,888
| 1
| 0
|
BSD-3-Clause
| 2019-12-09T04:40:07
| 2019-12-09T04:40:07
| null |
UTF-8
|
Python
| false
| false
| 784
|
include_rules = [
# This is needed for error page strings/resources.
# TODO(1024326): If WebLayer stays with WebView's error pages implementation
# long-term, componentize these strings/resources as part of componentizing
# that implementation and remove the need for this dependency.
"+android_webview/grit",
"+components/safe_browsing/common",
"+components/safe_browsing/renderer",
"+components/security_interstitials/content/renderer",
"+components/security_interstitials/core/common",
"+components/spellcheck/renderer",
"+content/public/renderer",
# needed for safebrowsing
"+mojo/public/cpp/bindings",
"+net/base",
"+services/service_manager/public/cpp",
"+third_party/blink/public/common",
"+third_party/blink/public/platform",
"+ui/base",
]
|
[
"commit-bot@chromium.org"
] |
commit-bot@chromium.org
|
|
f2785e274517171e84c495bf5d95f3995c8dc2d8
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/resources/azure-mgmt-resource/generated_samples/templatespecs/template_spec_versions_list.py
|
c4ccd74ea392cff33ce63cf02c753bf0b0fdf13a
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 1,616
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.resource import TemplateSpecsClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-resource
# USAGE
python template_spec_versions_list.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = TemplateSpecsClient(
credential=DefaultAzureCredential(),
subscription_id="00000000-0000-0000-0000-000000000000",
)
response = client.template_spec_versions.list(
resource_group_name="templateSpecRG",
template_spec_name="simpleTemplateSpec",
)
for item in response:
print(item)
# x-ms-original-file: specification/resources/resource-manager/Microsoft.Resources/stable/2022-02-01/examples/TemplateSpecVersionsList.json
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
Azure.noreply@github.com
|
f2635347110c452a2d60ced4e5326758ef7f8b40
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03359/s972946578.py
|
710fd6d1cfa4dd159230bf87a4c4b7c3ef86d7e3
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 95
|
py
|
a,b=(int(x) for x in input().split())
if a <= b:
count = a
else:
count = a - 1
print(count)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
4a4f00b6246f6d1d9b4c2c40a515f494e23e5d1f
|
6448c040f56d443673e25068a09ff904df8cb37a
|
/digital_comms/mobile_network/transmitter_module.py
|
4f26e411bb277c982f0ce90ef9e6c91910ef470d
|
[
"MIT"
] |
permissive
|
tg137/digital_comms
|
d6604e09dafad8079a9a2d1231b472457a07aef6
|
2b669833ceaa3d6cb28c820859c1ed5d0a5ce691
|
refs/heads/master
| 2020-05-25T04:46:46.758874
| 2019-05-19T17:04:12
| 2019-05-19T17:04:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 44,777
|
py
|
import os, sys, configparser
import csv
import glob
from rtree import index
import fiona
from shapely.geometry import shape, Point, Polygon, MultiPoint, mapping
from shapely.geometry.polygon import Polygon
from shapely.wkt import loads
from shapely.prepared import prep
import numpy as np
from pyproj import Proj, transform, Geod
from geographiclib.geodesic import Geodesic
import matplotlib.pyplot as plt
import pandas as pd
from scipy.spatial import Delaunay
from itertools import tee
from collections import OrderedDict
from digital_comms.mobile_network.path_loss_module import path_loss_calculator
#set seed for stochastic predictablity
np.random.seed(42)
#Define global simulation parameters
ITERATIONS = 500
TX_HEIGHT_BASE = 30
TX_HEIGHT_HIGH = 40
TX_POWER = 40
TX_GAIN = 20
TX_LOSSES = 2
RX_GAIN = 4
RX_LOSSES = 4
RX_MISC_LOSSES = 4
RX_HEIGHT = 1.5
PERCENTILE = 95
DESIRED_TRANSMITTER_DENSITY = 10 #per km^2
SECTORISATION = 3
SYSTEM_INPUT = os.path.join('data', 'raw')
CONFIG = configparser.ConfigParser()
CONFIG.read(
os.path.join(
os.path.dirname(__file__), '..', '..', 'scripts','script_config.ini'
)
)
BASE_PATH = CONFIG['file_locations']['base_path']
#data locations
DATA_RAW = os.path.join(BASE_PATH, 'raw')
DATA_INTERMEDIATE = os.path.join(BASE_PATH, 'intermediate')
DATA_RESULTS = os.path.join(BASE_PATH, '..' ,'results', 'system_simulator')
#set numpy seed
np.random.seed(42)
def read_postcode_sector(postcode_sector):
postcode_area = ''.join(
[i for i in postcode_sector[:2] if not i.isdigit()]
)
postcode_area = postcode_area.lower()
with fiona.open(
os.path.join(
DATA_RAW, 'd_shapes', 'postcode_sectors', postcode_area + '.shp')
, 'r') as source:
return [
sector for sector in source \
if sector['properties']['postcode'].replace(
" ", "") == postcode_sector][0]
def get_local_authority_ids(postcode_sector):
with fiona.open(os.path.join(
DATA_RAW, 'd_shapes','lad_uk_2016-12', 'lad_uk_2016-12.shp'),
'r') as source:
postcode_sector_geom = shape(postcode_sector['geometry'])
return [
lad['properties']['name'] for lad in source \
if postcode_sector_geom.intersection(shape(lad['geometry']))
]
def import_area_lut(postcode_sector_name, lad_ids):
for lad in lad_ids:
path = os.path.join(
DATA_RAW, '..', 'intermediate', 'mobile_geotype_lut',
lad, lad + '.csv'
)
with open(path, 'r') as system_file:
reader = csv.DictReader(system_file)
for line in reader:
if line['postcode_sector'].replace(
" ", "") == postcode_sector_name:
lut = {
'postcode_sector': line['postcode_sector'],
'indoor_probability': line['indoor_probability'],
'outdoor_probability': line['outdoor_probability'],
'residential_count': line['residential_count'],
'non_residential_count': line['non_residential_count'],
'estimated_population': int(float(line['residential_count'])*2.5),
'area': line['area'],
}
return lut
def determine_environment(postcode_sector_lut):
population_density = (
postcode_sector_lut['estimated_population'] / float(postcode_sector_lut['area'])
)
if population_density >= 7959:
environment = 'urban'
elif 3119 <= population_density < 7959:
environment = 'suburban'
elif 782 <= population_density < 3119:
environment = 'suburban'
elif 112 <= population_density < 782:
environment = 'rural'
elif 47 <= population_density < 112:
environment = 'rural'
elif 25 <= population_density < 47:
environment = 'rural'
elif population_density < 25:
environment = 'rural'
else:
environment = 'Environment not determined'
raise ValueError('Could not determine environment')
return environment
def get_sites(postcode_sector):
sites = []
geom = shape(postcode_sector['geometry'])
geom_length = geom.length
geom_buffer = geom.buffer(geom_length)
geom_box = geom_buffer.bounds
id_number = 0
with open(
os.path.join(
DATA_INTERMEDIATE, 'sitefinder', 'sitefinder_processed.csv'), 'r'
) as system_file:
reader = csv.DictReader(system_file)
for line in reader:
if (
geom_box[0] <= float(line['longitude']) and
geom_box[1] <= float(line['latitude']) and
geom_box[2] >= float(line['longitude']) and
geom_box[3] >= float(line['latitude'])
):
sites.append({
'type': "Feature",
'geometry': {
"type": "Point",
"coordinates": [
float(line['longitude']),
float(line['latitude'])
]
},
'properties': {
# "operator": line[2],
"sitengr": 'site_id_{}'.format(id_number),
"ant_height": line['Antennaht'],
"tech": line['Transtype'],
"freq": line['Freqband'],
"type": line['Anttype'],
"power": TX_POWER,
# "power_dbw": line['Powerdbw'],
# "max_power_dbw": line['Maxpwrdbw'],
# "max_power_dbm": line['Maxpwrdbm'],
"gain": TX_GAIN,
"losses": TX_LOSSES,
}
})
id_number += 1
else:
pass
return sites
def generate_receivers(postcode_sector, postcode_sector_lut, quantity):
"""
The indoor probability provides a likelihood of a user being indoor,
given the building footprint area and number of floors for all
building stock, in a postcode sector.
Parameters
----------
postcode_sector : polygon
Shape of the area we want to generate receivers within.
postcode_sector_lut : dict
Contains information on indoor and outdoor probability.
quantity: int
Number of receivers we want to generate within the desired area.
Output
------
receivers : List of dicts
Contains the quantity of desired receivers within the area boundary.
"""
indoor_probability = postcode_sector_lut['indoor_probability']
coordinates = []
geom = shape(postcode_sector['geometry'])
geom_box = geom.bounds
minx = geom_box[0]
miny = geom_box[1]
maxx = geom_box[2]
maxy = geom_box[3]
receivers = []
id_number = 0
while len(receivers) < quantity:
x_coord = np.random.uniform(low=minx, high=maxx, size=1)
y_coord = np.random.uniform(low=miny, high=maxy, size=1)
indoor_outdoor_probability = np.random.rand(1,1)[0][0]
coordinates = list(zip(x_coord, y_coord))
# Join the two
postcode_sector_shape = shape(postcode_sector['geometry'])
receiver = Point((x_coord, y_coord))
if postcode_sector_shape.contains(receiver):
receivers.append({
'type': "Feature",
'geometry': {
"type": "Point",
"coordinates": [coordinates[0][0],coordinates[0][1]],
},
'properties': {
'ue_id': "id_{}".format(id_number),
#"sitengr": 'TL4454059600',
"misc_losses": RX_MISC_LOSSES,
"gain": RX_GAIN,
"losses": RX_LOSSES,
"ue_height": float(RX_HEIGHT),
"indoor": (True if float(indoor_outdoor_probability) < \
float(indoor_probability) else False),
}
})
id_number += 1
else:
pass
return receivers
def find_and_deploy_new_site(
existing_sites, new_sites, geojson_postcode_sector, idx):
"""
Given existing site locations, try deploy a new one in the area
which has the largest existing gap between sites.
Parameters
----------
existing_sites : List of objects
Contains existing sites
iteration_number : int
The loop index, used for the providing the id for a new asset
geojson_postcode_sector : GeoJson
The postcode sector boundary in GeoJson format.
"""
NEW_TRANSMITTERS = []
for n in range(0, new_sites):
existing_site_coordinates = []
for existing_site in existing_sites.values():
existing_site_coordinates.append(
existing_site.coordinates
)
#convert to numpy array
existing_site_coordinates = np.array(
existing_site_coordinates
)
#get delaunay grid
tri = Delaunay(existing_site_coordinates)
#get coordinates from gri
coord_groups = [tri.points[x] for x in tri.simplices]
#convert coordinate groups to polygons
polygons = [Polygon(x) for x in coord_groups]
#sort based on area
polygons = sorted(polygons, key=lambda x: x.area, reverse=True)
geom = shape(geojson_postcode_sector['geometry'])
#try to allocate using the delauney polygon with the largest area first
try:
for new_site_area in polygons:
#get the centroid from the largest area
centroid = new_site_area.centroid
if geom.contains(centroid):
break
else:
continue
x_coord = np.random.uniform(low=minx, high=maxx, size=1)
y_coord = np.random.uniform(low=miny, high=maxy, size=1)
#if no delauney polygon centroids are in the area boundary, randomly allocate
except:
geom_box = geom.bounds
minx = geom_box[0]
miny = geom_box[1]
maxx = geom_box[2]
maxy = geom_box[3]
random_site_location = []
while len(random_site_location) == 0:
x_coord = np.random.uniform(low=minx, high=maxx, size=1)
y_coord = np.random.uniform(low=miny, high=maxy, size=1)
receiver = Point((x_coord, y_coord))
if geom.contains(receiver):
centroid = receiver.centroid
random_site_location.append(receiver)
else:
continue
NEW_TRANSMITTERS.append({
'type': "Feature",
'geometry': {
"type": "Point",
"coordinates": [centroid.x, centroid.y]
},
'properties': {
"operator": 'unknown',
"sitengr": "{" + 'new' + "}{GEN" + str(idx) + '.' + str(n+1) + '}',
"ant_height": TX_HEIGHT_BASE,
"tech": 'LTE',
"freq": 700,
"type": 17,
"power": TX_POWER,
"gain": TX_GAIN,
"losses": TX_LOSSES,
}
})
return NEW_TRANSMITTERS
class NetworkManager(object):
def __init__(self, area, sites, receivers):
self.area = {}
self.sites = {}
self.receivers = {}
area_id = area['properties']['postcode']
self.area[area_id] = Area(area)
for site in sites:
site_id = site['properties']["sitengr"]
site = Transmitter(site)
self.sites[site_id] = site
area_containing_sites = self.area[area_id]
area_containing_sites.add_site(site)
for receiver in receivers:
receiver_id = receiver['properties']["ue_id"]
receiver = Receiver(receiver)
self.receivers[receiver_id] = receiver
area_containing_receivers = self.area[area_id]
area_containing_receivers.add_receiver(receiver)
def build_new_assets(self, list_of_new_assets, area_id):
for site in list_of_new_assets:
site_id = site['properties']["sitengr"]
site = Transmitter(site)
self.sites[site_id] = site
for area_containing_sites in self.area.values():
if area_containing_sites.id == area_id:
area_containing_sites.add_site(site)
def estimate_link_budget(
self, frequency, bandwidth, generation, mast_height,
environment, modulation_and_coding_lut):
"""
Takes propagation parameters and calculates link budget capacity.
Parameters
----------
frequency : float
The carrier frequency for the chosen spectrum band (GHz).
bandwidth : float
The width of the spectrum around the carrier frequency (MHz).
environment : string
Either urban, suburban or rural.
modulation_and_coding_lut : list of tuples
A lookup table containing modulation and coding rates,
spectral efficiencies and SINR estimates.
Returns
-------
sinr : float
The signal to noise plut interference ratio (GHz).
capacity_mbps : float
The estimated link budget capacity.
"""
results = []
for receiver in self.receivers.values():
# print(receiver.id)
closest_site, interfering_sites = (
self.find_closest_available_sites(receiver)
)
# print('closest_site is {}'.format(closest_site))
# print('interfering_sites is {}'.format(interfering_sites))
path_loss = self.calculate_path_loss(
closest_site, receiver, frequency, mast_height, environment
)
received_power = self.calc_received_power(
closest_site, receiver, path_loss
)
interference = self.calculate_interference(
interfering_sites, receiver, frequency, environment)
noise = self.calculate_noise(
bandwidth
)
sinr = self.calculate_sinr(
received_power, interference, noise
)
spectral_efficiency = self.modulation_scheme_and_coding_rate(
sinr, generation, modulation_and_coding_lut
)
estimated_capacity = self.link_budget_capacity(
bandwidth, spectral_efficiency
)
data = {
'spectral_efficiency': spectral_efficiency,
'sinr': sinr,
'capacity_mbps': estimated_capacity
}
results.append(data)
# print('received_power is {}'.format(received_power))
# print('interference is {}'.format(interference))
# print('noise is {}'.format(noise))
# print('sinr is {}'.format(sinr))
# print('spectral_efficiency is {}'.format(spectral_efficiency))
# print('estimated_capacity is {}'.format(estimated_capacity))
# print('path_loss is {}'.format(path_loss))
# print('-----------------------------')
return results
def find_closest_available_sites(self, receiver):
"""
Returns a list of all sites, ranked based on proximity
to the receiver.
"""
idx = index.Index()
for site in self.sites.values():
idx.insert(0, Point(site.coordinates).bounds, site)
number_of_sites = len(self.sites.values())
all_closest_sites = list(
idx.nearest(
Point(receiver.coordinates).bounds,
number_of_sites, objects='raw')
)
closest_site = all_closest_sites[0]
interfering_sites = all_closest_sites[1:4]
return closest_site, interfering_sites
def calculate_path_loss(self, closest_site, receiver,
frequency, mast_height, environment):
# for area in self.area.values():
# local_authority_ids = area.local_authority_ids
x2_receiver = receiver.coordinates[0]
y2_receiver = receiver.coordinates[1]
x1_site, y1_site = transform_coordinates(
Proj(init='epsg:27700'), Proj(init='epsg:4326'),
closest_site.coordinates[0],
closest_site.coordinates[1],
)
x2_receiver, y2_receiver = transform_coordinates(
Proj(init='epsg:27700'), Proj(init='epsg:4326'),
receiver.coordinates[0],
receiver.coordinates[1],
)
Geo = Geodesic.WGS84
i_strt_distance = Geo.Inverse(
y1_site, x1_site, y2_receiver, x2_receiver
)
interference_strt_distance = round(i_strt_distance['s12'],0)
ant_height = mast_height
ant_type = 'macro'
# type_of_sight, building_height, street_width = built_environment_module(
# site_geom, receiver_geom
if interference_strt_distance < 250 :
type_of_sight = 'los'
else:
type_of_sight = 'nlos'
building_height = 20
street_width = 20
above_roof = 0
location = receiver.indoor
path_loss = path_loss_calculator(
frequency,
interference_strt_distance,
ant_height,
ant_type,
building_height,
street_width,
environment,
type_of_sight,
receiver.ue_height,
above_roof,
location
)
return path_loss
def calc_received_power(self, site, receiver, path_loss):
"""
Calculate received power based on site and receiver
characteristcs, and path loss.
Equivalent Isotropically Radiated Power (EIRP) = Power + Gain - Losses
"""
#calculate Equivalent Isotropically Radiated Power (EIRP)
eirp = float(site.power) + \
float(site.gain) - \
float(site.losses)
received_power = eirp - \
path_loss - \
receiver.misc_losses + \
receiver.gain - \
receiver.losses
# print('received power is {}'.format(received_power))
return received_power
def calculate_interference(
self, closest_sites, receiver, frequency, environment):
"""
Calculate interference from other cells.
closest_sites contains all sites, ranked based
on distance, meaning we need to select cells 1-3 (as cell 0
is the actual cell in use)
"""
interference = []
x1_receiver, y1_receiver = transform_coordinates(
Proj(init='epsg:27700'),
Proj(init='epsg:4326'),
receiver.coordinates[0],
receiver.coordinates[1]
)
#calculate interference from other power sources
for interference_site in closest_sites:
#get distance
x2_interference = interference_site.coordinates[0]
y2_interference = interference_site.coordinates[1]
x2_interference, y2_interference = transform_coordinates(
Proj(init='epsg:27700'),
Proj(init='epsg:4326'),
interference_site.coordinates[0],
interference_site.coordinates[1]
)
Geo = Geodesic.WGS84
i_strt_distance = Geo.Inverse(
y2_interference,
x2_interference,
y1_receiver,
x1_receiver,
)
interference_strt_distance = int(
round(i_strt_distance['s12'], 0)
)
ant_height = 20
ant_type = 'macro'
building_height = 20
street_width = 20
type_of_sight = randomly_select_los()
above_roof = 0
indoor = receiver.indoor
path_loss = path_loss_calculator(
frequency,
interference_strt_distance,
ant_height,
ant_type,
building_height,
street_width,
environment,
type_of_sight,
receiver.ue_height,
above_roof,
indoor,
)
# print('path loss for {} to {} is {}'.format(
# receiver.id, interference_site.id, path_loss)
# )
#calc interference from other cells
received_interference = self.calc_received_power(
interference_site,
receiver,
path_loss
)
#add cell interference to list
interference.append(received_interference)
return interference
def calculate_noise(self, bandwidth):
#TODO
"""
Terminal noise can be calculated as:
“K (Boltzmann constant) x T (290K) x bandwidth”.
The bandwidth depends on bit rate, which defines the number of resource blocks.
We assume 50 resource blocks, equal 9 MHz, transmission for 1 Mbps downlink.
Required SNR (dB)
Detection bandwidth (BW) (Hz)
k = Boltzmann constant
T = Temperature (kelvins) (290 kelvin = ~16 celcius)
NF = Receiver noise figure
NoiseFloor (dBm) = 10log10(k*T*1000)+NF+10log10BW
NoiseFloor (dBm) = 10log10(1.38x10e-23*290*1x10e3)+1.5+10log10(10x10e6)
"""
k = 1.38e-23
t = 290
BW = bandwidth*1000000
noise = 10*np.log10(k*t*1000)+1.5+10*np.log10(BW)
return noise
def calculate_sinr(self, received_power, interference, noise):
"""
Calculate the Signal-to-Interference-plus-Noise-Ration (SINR).
"""
raw_received_power = 10**received_power
interference_values = []
for value in interference:
output_value = 10**value
interference_values.append(output_value)
raw_sum_of_interference = sum(interference_values)
raw_noise = 10**noise
sinr = np.log10(
raw_received_power / (raw_sum_of_interference + raw_noise)
)
return round(sinr, 2)
def modulation_scheme_and_coding_rate(self, sinr,
generation, modulation_and_coding_lut):
"""
Uses the SINR to allocate a modulation scheme and affliated
coding rate.
"""
spectral_efficiency = 0
for lower, upper in pairwise(modulation_and_coding_lut):
if lower[0] and upper[0] == generation:
lower_sinr = lower[5]
upper_sinr = upper[5]
if sinr >= lower_sinr and sinr < upper_sinr:
spectral_efficiency = lower[4]
break
return spectral_efficiency
def link_budget_capacity(self, bandwidth, spectral_efficiency):
"""
Estimate wireless link capacity (Mbps) based on bandwidth and
receiver signal.
capacity (Mbps) = bandwidth (MHz) + log2*(1+SINR[dB])
"""
#estimated_capacity = round(bandwidth*np.log2(1+sinr), 2)
bandwidth_in_hertz = bandwidth*1000000
link_budget_capacity = bandwidth_in_hertz*spectral_efficiency
link_budget_capacity_mbps = link_budget_capacity / 1000000
return link_budget_capacity_mbps
def find_sites_in_area(self):
if not self.sites:
return 0
area_geometry = ([(d.geometry) for d in self.area.values()][0])
idx = index.Index()
for site in self.sites.values():
idx.insert(0, Point(site.coordinates).bounds, site)
sites_in_area = []
for n in idx.intersection(shape(area_geometry).bounds, objects=True):
point = Point(n.object.coordinates)
if shape(area_geometry).contains(point):
sites_in_area.append(n.object)
return sites_in_area
def site_density(self):
"""
Calculate site density per square kilometer (km^2)
Returns
-------
obj
Sum of sites
Notes
-----
Function returns `0` when no sites are configered to the area.
"""
if not self.sites:
return 0
sites_in_area = self.find_sites_in_area()
postcode_sector_area = (
[round(a.area) for a in self.area.values()]
)[0]
site_density = (
len(sites_in_area) / (postcode_sector_area/1000000)
)
return site_density
def receiver_density(self):
"""
Calculate receiver density per square kilometer (km^2)
Returns
-------
obj
Sum of receiver
Notes
-----
Function returns `0` when no receivers are configered to the area.
"""
if not self.receivers:
return 0
postcode_sector_area = (
[round(a.area) for a in self.area.values()]
)[0]
receiver_density = (
len(self.receivers) / (postcode_sector_area/1000000)
)
return receiver_density
def energy_consumption(self, cells_per_site):
"""
Gets the energy consumption of the sites in the area.
Parameters
----------
total_power_dbm : float
Total dbm for all sites for a single cell.
watts_for_1_cell_per_site : float
Total watts for all sites for a single cell.
total_power_watts : float
Total watts for all cells in use.
"""
if not self.area:
return 0
sites_in_area = self.find_sites_in_area()
# print('number of sites_in_area {}'.format(len(sites_in_area)))
total_power_dbm = [round(a.power) for a in sites_in_area]
watts_per_area = []
for value in total_power_dbm:
watts_for_1_cell_per_site = 1 * 10**(value / 10) / 1000
wattsd_per_site = watts_for_1_cell_per_site * cells_per_site
watts_per_area.append(wattsd_per_site)
total_power_watts = sum(watts_per_area)
# print('total_power_watts {}'.format(total_power_watts/1000000))
return total_power_watts
class Area(object):
"""
The geographic area which holds all sites and receivers.
"""
def __init__(self, data):
#id and geographic info
self.id = data['properties']['postcode']
self.local_authority_ids = data['properties']['local_authority_ids']
self.geometry = data['geometry']
self.coordinates = data['geometry']['coordinates']
self.area = self._calculate_area(data)
#connections
self._sites = {}
self._receivers = {}
def _calculate_area(self, data):
polygon = shape(data['geometry'])
area = polygon.area
return area
def add_site(self, site):
self._sites[site.id] = site
def add_receiver(self, receiver):
self._receivers[receiver.id] = receiver
class Transmitter(object):
"""
A site object is specific site.
"""
def __init__(self, data):
#id and geographic info
self.id = data['properties']['sitengr']
self.coordinates = data['geometry']['coordinates']
self.geometry = data['geometry']
#antenna properties
self.ant_type = 'macro'
self.ant_height = TX_HEIGHT_BASE
self.power = TX_POWER
self.gain = TX_GAIN
self.losses = TX_LOSSES
def __repr__(self):
return "<Transmitter id:{}>".format(self.id)
class Receiver(object):
"""
A receiver object is a piece of user equipment which can
connect to a site.
"""
def __init__(self, data):
#id and geographic info
self.id = data['properties']['ue_id']
#self.site_id = data['properties']['sitengr']
self.coordinates = data['geometry']["coordinates"]
#parameters
self.misc_losses = data['properties']['misc_losses']
self.gain = data['properties']['gain']
self.losses = data['properties']['losses']
self.ue_height = data['properties']['ue_height']
self.indoor = data['properties']['indoor']
def __repr__(self):
return "<Receiver id:{}>".format(self.id)
def randomly_select_los():
np.random.seed(42)
number = round(np.random.rand(1,1)[0][0], 2)
if number > 0.5:
los = 'los'
else:
los = 'nlos'
return los
def transform_coordinates(old_proj, new_proj, x, y):
new_x, new_y = transform(old_proj, new_proj, x, y)
return new_x, new_y
def obtain_threshold_values(results, percentile):
"""
Get the threshold capacity based on a given percentile.
"""
spectral_efficency = []
sinr = []
threshold_capacity_value = []
for result in results:
spectral_efficency.append(result['spectral_efficiency'])
sinr.append(result['sinr'])
threshold_capacity_value.append(result['capacity_mbps'])
spectral_efficency = np.percentile(spectral_efficency, percentile)
sinr = np.percentile(sinr, percentile)
capacity_mbps = np.percentile(threshold_capacity_value, percentile)
return spectral_efficency, sinr, capacity_mbps
def pairwise(iterable):
"""
Return iterable of 2-tuples in a sliding window
Parameters
----------
iterable: list
Sliding window
Returns
-------
list of tuple
Iterable of 2-tuples
Example
-------
>>> list(pairwise([1,2,3,4]))
[(1,2),(2,3),(3,4)]
"""
a, b = tee(iterable)
next(b, None)
return zip(a, b)
def calculate_network_efficiency(spectral_efficency, energy_consumption):
if spectral_efficency == 0 or energy_consumption == 0:
network_efficiency = 0
else:
network_efficiency = (
float(spectral_efficency) // float(MANAGER.energy_consumption(SECTORISATION))
)
return network_efficiency
def write_results(results, frequency, bandwidth, site_density,
r_density, postcode_sector_name):
suffix = 'freq_{}_bandwidth_{}_density_{}'.format(
frequency, bandwidth, site_density
)
directory = os.path.join(DATA_RESULTS, postcode_sector_name)
if not os.path.exists(directory):
os.makedirs(directory)
filename = '{}.csv'.format(suffix)
directory = os.path.join(directory, filename)
if not os.path.exists(directory):
results_file = open(directory, 'w', newline='')
results_writer = csv.writer(results_file)
results_writer.writerow(
('frequency','bandwidth','site_density','r_density',
'sinr','throughput')
)
else:
results_file = open(directory, 'a', newline='')
results_writer = csv.writer(results_file)
# output and report results for this timestep
for result in results:
# Output metrics
results_writer.writerow(
(frequency,
bandwidth,
site_density,
r_density,
result['sinr'],
result['estimated_capacity'])
)
results_file.close()
def write_lookup_table(
cell_edge_spectral_efficency, cell_edge_sinr, area_capacity_mbps,
network_efficiency, environment, operator, technology,frequency,
bandwidth, mast_height, area_site_density, area_isd, postcode_sector_name):
suffix = 'lookup_table_{}'.format(postcode_sector_name)
directory = os.path.join(DATA_RESULTS, postcode_sector_name)
if not os.path.exists(directory):
os.makedirs(directory)
filename = '{}.csv'.format(suffix)
directory = os.path.join(directory, filename)
if not os.path.exists(directory):
lut_file = open(directory, 'w', newline='')
lut_writer = csv.writer(lut_file)
lut_writer.writerow(
('environment', 'operator', 'technology',
'frequency', 'bandwidth', 'mast_height',
'area_site_density', 'area_isd',
'cell_edge_spectral_efficency', 'cell_edge_sinr',
'area_capacity_mbps', 'network_efficiency')
)
else:
lut_file = open(directory, 'a', newline='')
lut_writer = csv.writer(lut_file)
# output and report results for this timestep
lut_writer.writerow(
(environment,
operator,
technology,
frequency,
bandwidth,
mast_height,
area_site_density,
area_isd,
cell_edge_spectral_efficency,
cell_edge_sinr,
area_capacity_mbps,
network_efficiency)
)
lut_file.close()
def write_shapefile(data, postcode_sector_name, filename):
# Translate props to Fiona sink schema
prop_schema = []
for name, value in data[0]['properties'].items():
fiona_prop_type = next((
fiona_type for fiona_type, python_type in \
fiona.FIELD_TYPES_MAP.items() if \
python_type == type(value)), None
)
prop_schema.append((name, fiona_prop_type))
sink_driver = 'ESRI Shapefile'
sink_crs = {'init': 'epsg:27700'}
sink_schema = {
'geometry': data[0]['geometry']['type'],
'properties': OrderedDict(prop_schema)
}
# Create path
directory = os.path.join(DATA_RESULTS, postcode_sector_name)
if not os.path.exists(directory):
os.makedirs(directory)
print(os.path.join(directory, filename))
# Write all elements to output file
with fiona.open(
os.path.join(directory, filename), 'w',
driver=sink_driver, crs=sink_crs, schema=sink_schema) as sink:
for feature in data:
sink.write(feature)
def format_data(existing_data, new_data, frequency, bandwidth,
postcode_sector_name):
for datum in new_data:
existing_data.append({
'frequency': frequency,
'bandwidth': bandwidth,
'sinr': datum['sinr'],
'capacity': datum['estimated_capacity']
})
return existing_data
#####################################
# VISUALISE NETWORK STATS
#####################################
def plot_data(data, frequency, bandwidth, postcode_sector_name):
sinr = []
capacity = []
for datum in data:
sinr.append(datum['sinr'])
capacity.append(datum['estimated_capacity'])
plt.figure()
plt.scatter(sinr, capacity)
plt.xlabel("SINR")
plt.ylabel("Capacity (Mbps)")
plt.legend(loc='upper left')
plt.axis((0,30,0,150))
# Create path
directory = os.path.join(DATA_RESULTS, postcode_sector_name, 'plots')
if not os.path.exists(directory):
os.makedirs(directory)
plt.savefig(os.path.join(
directory, 'freq_{}_bw_{}.png'.format(frequency, bandwidth)
))
def joint_plot(data, postcode_sector_name):
sinr_700_10 = []
sinr_800_10 = []
sinr_900_10 = []
sinr_1800_10 = []
sinr_2100_10 = []
sinr_2600_10 = []
capacity_700_10 = []
capacity_800_10 = []
capacity_900_10 = []
capacity_1800_10 = []
capacity_2100_10 = []
capacity_2600_10 = []
for datum in data:
if datum['frequency'] == 0.7 and datum['bandwidth'] == 10:
sinr_700_10.append(datum['sinr'])
capacity_700_10.append(datum['capacity'])
if datum['frequency'] == 0.8 and datum['bandwidth'] == 10:
sinr_800_10.append(datum['sinr'])
capacity_800_10.append(datum['capacity'])
if datum['frequency'] == 0.9 and datum['bandwidth'] == 10:
sinr_900_10.append(datum['sinr'])
capacity_900_10.append(datum['capacity'])
if datum['frequency'] == 1.8 and datum['bandwidth'] == 10:
sinr_1800_10.append(datum['sinr'])
capacity_1800_10.append(datum['capacity'])
if datum['frequency'] == 2.1 and datum['bandwidth'] == 10:
sinr_2100_10.append(datum['sinr'])
capacity_2100_10.append(datum['capacity'])
if datum['frequency'] == 2.6 and datum['bandwidth'] == 10:
sinr_2600_10.append(datum['sinr'])
capacity_2600_10.append(datum['capacity'])
#setup and plot
plt.scatter(sinr_700_10, capacity_700_10, label='10@700GHz ')
plt.scatter(sinr_800_10, capacity_800_10, label='10@800GHz')
plt.scatter(sinr_900_10, capacity_900_10, label='10@900GHz')
plt.scatter(sinr_1800_10, capacity_1800_10, label='10@1800GHz')
plt.scatter(sinr_2100_10, capacity_2100_10, label='10@2100GHz')
plt.scatter(sinr_2600_10, capacity_2600_10, label='10@2600GHz')
plt.xlabel("SINR")
plt.ylabel("Capacity (Mbps)")
plt.legend(loc='upper left')
# Create path
directory = os.path.join(DATA_RESULTS, postcode_sector_name, 'plots')
if not os.path.exists(directory):
os.makedirs(directory)
plt.savefig(os.path.join(directory, 'panel_plot.png'))
#####################################
# APPLY METHODS
#####################################
SPECTRUM_PORTFOLIO = [
('generic', 'FDD DL', 0.7, 10, '5G'),
('generic', 'FDD DL', 0.8, 10, '4G'),
('generic', 'FDD DL', 2.6, 10, '4G'),
('generic', 'FDD DL', 3.5, 80, '5G'),
]
MAST_HEIGHT = [
(30),
(40)
]
MODULATION_AND_CODING_LUT =[
# CQI Index Modulation Coding rate
# Spectral efficiency (bps/Hz) SINR estimate (dB)
('4G', 1, 'QPSK', 0.0762, 0.1523, -6.7),
('4G', 2, 'QPSK', 0.1172, 0.2344, -4.7),
('4G', 3, 'QPSK', 0.1885, 0.377, -2.3),
('4G', 4, 'QPSK', 0.3008, 0.6016, 0.2),
('4G', 5, 'QPSK', 0.4385, 0.877, 2.4),
('4G', 6, 'QPSK', 0.5879, 1.1758, 4.3),
('4G', 7, '16QAM', 0.3691, 1.4766, 5.9),
('4G', 8, '16QAM', 0.4785, 1.9141, 8.1),
('4G', 9, '16QAM', 0.6016, 2.4063, 10.3),
('4G', 10, '64QAM', 0.4551, 2.7305, 11.7),
('4G', 11, '64QAM', 0.5537, 3.3223, 14.1),
('4G', 12, '64QAM', 0.6504, 3.9023, 16.3),
('4G', 13, '64QAM', 0.7539, 4.5234, 18.7),
('4G', 14, '64QAM', 0.8525, 5.1152, 21),
('4G', 15, '64QAM', 0.9258, 5.5547, 22.7),
('5G', 1, 'QPSK', 78, 0.1523, -6.7),
('5G', 2, 'QPSK', 193, 0.377, -4.7),
('5G', 3, 'QPSK', 449, 0.877, -2.3),
('5G', 4, '16QAM', 378, 1.4766, 0.2),
('5G', 5, '16QAM', 490, 1.9141, 2.4),
('5G', 6, '16QAM', 616, 2.4063, 4.3),
('5G', 7, '64QAM', 466, 2.7305, 5.9),
('5G', 8, '64QAM', 567, 3.3223, 8.1),
('5G', 9, '64QAM', 666, 3.9023, 10.3),
('5G', 10, '64QAM', 772, 4.5234, 11.7),
('5G', 11, '64QAM', 873, 5.1152, 14.1),
('5G', 12, '256QAM', 711, 5.5547, 16.3),
('5G', 13, '256QAM', 797, 6.2266, 18.7),
('5G', 14, '256QAM', 885, 6.9141, 21),
('5G', 15, '256QAM', 948, 7.4063, 22.7),
]
if __name__ == "__main__":
if len(sys.argv) != 2:
print("Error: no postcode sector provided")
#print("Usage: {} <postcode>".format(os.path.basename(__file__)))
exit(-1)
print('Process ' + sys.argv[1])
postcode_sector_name = sys.argv[1]
postcode_sector_abbr = sys.argv[1].replace('postcode_sector_', '')
#get postcode sector
geojson_postcode_sector = read_postcode_sector(postcode_sector_name)
#get local authority district
local_authority_ids = get_local_authority_ids(geojson_postcode_sector)
#add lad information to postcode sectors
geojson_postcode_sector['properties']['local_authority_ids'] = (
local_authority_ids
)
#get the probability for inside versus outside calls
postcode_sector_lut = import_area_lut(
postcode_sector_name, local_authority_ids
)
#get propagation environment (urban, suburban or rural)
environment = determine_environment(postcode_sector_lut)
#get list of sites
TRANSMITTERS = get_sites(geojson_postcode_sector)
# {'operator': 'O2', 'sitengr': 'TL4491058710', 'ant_height': '5',
# 'tech': 'GSM', 'freq': '900', 'type': '3.2', 'power': 30,
# 'gain': 18, 'losses': 2}
#generate receivers
RECEIVERS = generate_receivers(
geojson_postcode_sector,
postcode_sector_lut,
ITERATIONS
)
idx = 0
for mast_height in MAST_HEIGHT:
for operator, technology, frequency, bandwidth, generation in SPECTRUM_PORTFOLIO:
#load system model with data
MANAGER = NetworkManager(
geojson_postcode_sector, TRANSMITTERS, RECEIVERS
)
#calculate site density
starting_site_density = MANAGER.site_density()
# print('starting_site_density {}'.format(starting_site_density))
# my_range = np.linspace(
# starting_site_density, DESIRED_TRANSMITTER_DENSITY, 10
# )
# site_densities = list(set([round(x) for x in my_range]))
site_densities = [starting_site_density, 10]
postcode_sector_object = [a for a in MANAGER.area.values()][0]
postcode_sector_area = postcode_sector_object.area/1e6
idx = 0
for site_density in site_densities:
print("{} GHz {}m Height {} Density".format(
frequency, mast_height, round(site_density, 4)
))
current_site_density = MANAGER.site_density()
number_of_new_sites = int(
(site_density - current_site_density) * postcode_sector_area
)
print('number_of_new_sites {}'.format(number_of_new_sites))
NEW_TRANSMITTERS = find_and_deploy_new_site(
MANAGER.sites, number_of_new_sites,
geojson_postcode_sector, idx
)
MANAGER.build_new_assets(
NEW_TRANSMITTERS, geojson_postcode_sector
)
results = MANAGER.estimate_link_budget(
frequency, bandwidth, generation, mast_height,
environment, MODULATION_AND_CODING_LUT
)
site_density = MANAGER.site_density()
# print('site_density is {}'.format(site_density))
isd = 'tbc'
r_density = MANAGER.receiver_density()
# write_results(results, frequency, bandwidth, site_density,
# r_density, postcode_sector_name
# )
#find percentile values
spectral_efficency, sinr, capacity_mbps = (
obtain_threshold_values(results, PERCENTILE)
)
network_efficiency = calculate_network_efficiency(
spectral_efficency,
MANAGER.energy_consumption(SECTORISATION)
)
area_capacity_mbps = capacity_mbps * SECTORISATION
# print('spectral_efficency is {}'.format(spectral_efficency))
# print('sinr is {}'.format(sinr))
# print('capacity_mbps is {}'.format(capacity_mbps))
#env, frequency, bandwidth, site_density, capacity
write_lookup_table(
spectral_efficency, sinr, area_capacity_mbps,
network_efficiency, environment, operator, technology,
frequency, bandwidth, mast_height, site_density, isd,
postcode_sector_name
)
idx += 1
#print('------------------------------------')
# # print('write buildings')
# # write_shapefile(buildings, postcode_sector_name, 'buildings.shp')
# # print('write receivers')
# # write_shapefile(RECEIVERS, postcode_sector_name, 'receivers.shp')
# print('write sites')
# write_shapefile(TRANSMITTERS, postcode_sector_name, 'sites.shp')
# print('write boundary')
# geojson_postcode_sector_list = []
# geojson_postcode_sector_list.append(geojson_postcode_sector)
# write_shapefile(
# geojson_postcode_sector_list, postcode_sector_name, '_boundary.shp'
# )
|
[
"edward.oughton@gmail.com"
] |
edward.oughton@gmail.com
|
352bab04b5f300ec488b2c2277b3fcce8aa5430d
|
0fd92b7d882a1edb5542f6600bb177dcad67ed50
|
/powerful104/2476.py
|
59884bdebe88950cc45e5841b1b8de057f17569f
|
[] |
no_license
|
alpha-kwhn/Baekjun
|
bce71fdfbbc8302ec254db5901109087168801ed
|
f8b4136130995dab78f34e84dfa18736e95c8b55
|
refs/heads/main
| 2023-08-02T11:11:19.482020
| 2021-03-09T05:34:01
| 2021-03-09T05:34:01
| 358,347,708
| 0
| 0
| null | 2021-04-15T17:56:14
| 2021-04-15T17:56:13
| null |
UTF-8
|
Python
| false
| false
| 317
|
py
|
ans=0
for _ in range(int(input())):
a, b, c= map(int, input().split())
prize=0
if a==b==c:
prize=10000+a*1000
elif a==b or a==c:
prize=1000+a*100
elif b==c:
prize=1000+b*100
else:
prize=max(a,b,c)*100
if ans<prize:
ans=prize
print(ans)
|
[
"noreply@github.com"
] |
alpha-kwhn.noreply@github.com
|
b3a89023ea3508c3e11a114dd212533d8cafa3d2
|
c7f43c4cc0ee84a5fe246b67f51e30b8d726ebd5
|
/ml/m30_pca2_5_diabetes_RF.py
|
cf6c7c7cf6d4e1510dfd2fa544ccd37999038961
|
[] |
no_license
|
89Mansions/AI_STUDY
|
d9f8bdf206f14ba41845a082e731ea844d3d9007
|
d87c93355c949c462f96e85e8d0e186b0ce49c76
|
refs/heads/master
| 2023-07-21T19:11:23.539693
| 2021-08-30T08:18:59
| 2021-08-30T08:18:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,082
|
py
|
# PCA : 차원축소, 컬럼 재구성
# RandomForest로 모델링
import numpy as np
from sklearn.datasets import load_diabetes
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.metrics import r2_score
from sklearn.pipeline import Pipeline
from xgboost import XGBRegressor
#1. DATA
datasets = load_diabetes()
x = datasets.data
y = datasets.target
# print(x.shape, y.shape) # (442, 10) (442,)
pca = PCA(n_components=8)
x2 = pca.fit_transform(x) # fit_transform : 전처리 fit과 transform 한꺼번에 한다.
x_train, x_test, y_train, y_test = train_test_split(x2, y, train_size=0.8, shuffle=True, random_state=46)
print(x_train.shape) # (353, 8) >> 컬럼을 압축시켰다. 컬럼 재구성됨
print(x_test.shape) # (89, 8) >> 컬럼을 압축시켰다. 컬럼 재구성됨
# pca = PCA()
# pca.fit(x)
# cumsum = np.cumsum(pca.explained_variance_ratio_)
# print("cumsum : ", cumsum) # cumsum 누적 합을 계산
# cumsum : [0.40242142 0.55165324 0.67224947 0.76779711 0.83401567 0.89428759
# 0.94794364 0.99131196 0.99914395 1. ]
# d = np.argmax(cumsum >= 0.95)+1
# print("cumsum >= 0.95", cumsum > 0.95)
# print("d : ", d)
# cumsum >= 0.95 [False False False False False False False True True True]
# d : 8
# import matplotlib.pyplot as plt
# plt.plot(cumsum)
# plt.grid()
# plt.show()
#2. Modeling
model = Pipeline([("scaler", MinMaxScaler()),("model",RandomForestRegressor())])
model = Pipeline([("scaler", MinMaxScaler()),("model",XGBRegressor())])
#3. Train
model.fit(x_train, y_train)
#4. Score, Predict
result = model.score(x_test, y_test)
print("model.score : ", result)
y_pred = model.predict(x_test)
score = r2_score(y_pred, y_test)
print("r2_score : ", score)
# RandomForestRegressor
# model.score : 0.43512635590690074
# r2_score : -0.5421970924222612
# XGBoost
# model.score : 0.3449642489091771
# r2_score : -0.3388132027144872
|
[
"hwangkei0212@gmail.com"
] |
hwangkei0212@gmail.com
|
9db9821260783c8ab2205fe0109af946caaa20e8
|
3806db5b4bb7a638f30c818a29ccaf2b0ddb2836
|
/test_141.py
|
47edc016610b2369996717a30fdc8799c917f569
|
[] |
no_license
|
EomAA/fenics-qa
|
d0a687a7b84c51417e96eeeef9855c0d4ba27dea
|
c37a36a14450d0e7f6432c4726c5d96e0d6c4e96
|
refs/heads/master
| 2021-12-15T12:07:10.316478
| 2017-08-18T09:16:01
| 2017-08-18T09:16:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 634
|
py
|
import dolfin
mesh = dolfin.UnitSquareMesh(1,1)
dX = dolfin.dx(mesh)
fe = dolfin.FiniteElement(
family="Quadrature",
cell=mesh.ufl_cell(),
degree=1,
quad_scheme="default")
cppExprCode='''
namespace dolfin
{
class CppExpr : public Expression
{
public:
CppExpr(): Expression(0)
{
}
void eval(Array<double>& values, const Array<double>& position) const
{
std::cout << "position = " << position << std::endl;
values[0] = 1.;
std::cout << "values = " << values << std::endl;
}
};
}'''
cppExpr = dolfin.Expression(cppExprCode, element=fe)
dolfin.assemble(cppExpr * dX)
|
[
"miroslav.kuchta@gmail.com"
] |
miroslav.kuchta@gmail.com
|
571ad1ed92f668725894fda736f6a256d49267df
|
c4f01eec090833762b884c2078161df087d09b0d
|
/Design of software systems/Лаба 6/Scrapyard_Python/main.py
|
0f4823e7f1f4c5f38b15f88f0869b4765e3609a9
|
[] |
no_license
|
areyykarthik/Zhukouski_Pavel_BSU_Projects
|
47a30144c5614b10af521a78fba538a0e9184efa
|
3540979e680732d38e25a6b39f09338985de6743
|
refs/heads/master
| 2023-08-07T02:49:34.736155
| 2021-10-05T21:57:03
| 2021-10-05T21:57:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 23,386
|
py
|
from tkinter import *
from tkinter import messagebox
import sqlite3
CurrentID = 1
CurrentName = ""
CurrentPrice = 0.0
CurrentAmount = 0
CurrentGood = [0, "", 0.0, 0]
CurrentOrder = []
Window_SignIn = Tk()
Window_SignIn.withdraw()
Window_SignUp = Toplevel(Window_SignIn)
Window_SignUp.withdraw()
Window_OnlineShop = Toplevel(Window_SignUp)
Window_OnlineShop.withdraw()
Window_AccountInfo = Toplevel(Window_OnlineShop)
Window_AccountInfo.withdraw()
Window_PreparingOrder = Toplevel(Window_AccountInfo)
Window_PreparingOrder.withdraw()
SignIn_Email = StringVar()
SignIn_Password = StringVar()
SignUp_Email = StringVar()
SignUp_Password = StringVar()
SignUp_PasswordConfirm = StringVar()
AccountInfo_NewEmail = StringVar()
AccountInfo_NewPassword = StringVar()
HowManyWasTaken = StringVar()
MobilePhone = StringVar()
# --- БЛОК РАБОТЫ С БД ---
conn = sqlite3.connect("DataBase.db") # Подключение к БД с именем DataBase.db
cursor = conn.cursor()
# --- БЛОК РАБОТЫ С БД ---
# --- БЛОК РАБОТЫ С GUI ---
def SignIn_ButtonToSignUp_Clicked():
Window_SignIn.withdraw()
Window_SignUp_PowerOn()
def SignIn_ButtonConfirm_Clicked():
sql1 = f"SELECT * FROM ACCOUNTS WHERE email LIKE '{str(SignIn_Email.get())}'"
cursor.execute(sql1)
if cursor.fetchall():
sql2 = f"SELECT password FROM ACCOUNTS WHERE email LIKE '{str(SignIn_Email.get())}'"
cursor.execute(sql2)
if str(SignIn_Password.get()) == cursor.fetchall()[0][0]:
Window_SignIn.withdraw()
Window_OnlineShop_PowerOn()
else:
messagebox.showinfo("Online Shop - Sign In", f"Incorrect password for this account!")
else:
messagebox.showinfo("Online Shop - Sign In", f"No email like {str(SignIn_Email.get())} in DB!")
def Window_SignIn_PowerOn():
Window_SignIn.deiconify()
Window_SignIn.geometry('1500x800')
Window_SignIn.resizable(width=False, height=False)
Window_SignIn["bg"] = "#98FB98"
Window_SignIn.title("Online Shop - Sign In")
SignIn_Lbl1 = Label(Window_SignIn, text="SIGN IN", font=("Arial Bold", 50), bg="#98FB98")
SignIn_Lbl1.place(relx=0.5, rely=0.15, anchor="c")
SignIn_Lbl2 = Label(Window_SignIn, text="Enter Email:", font=("Arial Bold", 36), bg="#98FB98")
SignIn_Lbl2.place(relx=0.2, rely=0.3, anchor="c")
SignIn_Lbl3 = Label(Window_SignIn, text="Enter Password:", font=("Arial Bold", 36), bg="#98FB98")
SignIn_Lbl3.place(relx=0.17, rely=0.4, anchor="c")
SignIn_EmailTxt = Entry(Window_SignIn, width=20, bd=5, font=("Arial Bold", 36), textvariable=SignIn_Email)
SignIn_EmailTxt.place(relx=0.5, rely=0.3, anchor="c")
SignIn_PasswordTxt = Entry(Window_SignIn, width=20, bd=5, font=("Arial Bold", 36), show='*',
textvariable=SignIn_Password)
SignIn_PasswordTxt.place(relx=0.5, rely=0.4, anchor="c")
SignIn_ButtonToSignUp = Button(Window_SignIn, text="Don't have account yet? Sign Up!", font=("Arial Bold", 8), bd=5,
background="#3CB371", command=SignIn_ButtonToSignUp_Clicked)
SignIn_ButtonToSignUp.place(relx=0.5, rely=0.5, anchor="c")
SignIn_ButtonConfirm = Button(Window_SignIn, text="CONFIRM", font=("Arial Bold", 24), bd=10, background="#3CB371",
command=SignIn_ButtonConfirm_Clicked)
SignIn_ButtonConfirm.place(relx=0.5, rely=0.7, anchor="c")
Window_SignIn.mainloop()
def SignUp_ButtonToSignIn_Clicked():
Window_SignUp.withdraw()
Window_SignIn_PowerOn()
def SignUp_ButtonConfirm_Clicked():
if '@' in str(SignUp_Email.get()):
if 7 < len((str(SignUp_Password.get()))) < 17:
if str(SignUp_Password.get()) == str(SignUp_PasswordConfirm.get()):
cursor.execute(f"""INSERT INTO ACCOUNTS VALUES ('{str(SignUp_Email.get())}', '{str(SignUp_Password.get())}')""")
conn.commit()
messagebox.showinfo("Online Shop - Sign Up", "Account created successfully and added to DB!")
Window_SignUp.withdraw()
Window_SignIn_PowerOn()
else:
messagebox.showinfo("Online Shop - Sign Up", f"Passwords do not match!")
else:
messagebox.showinfo("Online Shop - Sign Up", f"Password must be from 8 to 16 characters!")
else:
messagebox.showinfo("Online Shop - Sign Up", f"Entered email {str(SignUp_Email.get())} is not valid!")
def Window_SignUp_PowerOn():
Window_SignUp.deiconify()
Window_SignUp.geometry('1500x800')
Window_SignUp.resizable(width=False, height=False)
Window_SignUp["bg"] = "#DDA0DD"
Window_SignUp.title("Online Shop - Sign Up")
SignUp_Lbl1 = Label(Window_SignUp, text="SIGN UP", font=("Arial Bold", 50), bg="#DDA0DD")
SignUp_Lbl1.place(relx=0.5, rely=0.1, anchor="c")
SignUp_Lbl2 = Label(Window_SignUp, text="Enter Email:", font=("Arial Bold", 36), bg="#DDA0DD")
SignUp_Lbl2.place(relx=0.2, rely=0.25, anchor="c")
SignUp_Lbl3 = Label(Window_SignUp, text="Enter Password:", font=("Arial Bold", 36), bg="#DDA0DD")
SignUp_Lbl3.place(relx=0.17, rely=0.35, anchor="c")
SignUp_Lbl4 = Label(Window_SignUp, text="Confirm Password:", font=("Arial Bold", 36), bg="#DDA0DD")
SignUp_Lbl4.place(relx=0.15, rely=0.45, anchor="c")
SignUp_EmailTxt = Entry(Window_SignUp, width=20, bd=5, font=("Arial Bold", 36), textvariable=SignUp_Email)
SignUp_EmailTxt.place(relx=0.5, rely=0.25, anchor="c")
SignUp_PasswordTxt = Entry(Window_SignUp, width=20, bd=5, font=("Arial Bold", 36), show='*',
textvariable=SignUp_Password)
SignUp_PasswordTxt.place(relx=0.5, rely=0.35, anchor="c")
SignUp_PasswordConfirmTxt = Entry(Window_SignUp, width=20, bd=5, font=("Arial Bold", 36), show='*',
textvariable=SignUp_PasswordConfirm)
SignUp_PasswordConfirmTxt.place(relx=0.5, rely=0.45, anchor="c")
SignUp_ButtonToSignIn = Button(Window_SignUp, text="Already have account? Sign In!", font=("Arial Bold", 8), bd=5,
background="#DA70D6", command=SignUp_ButtonToSignIn_Clicked)
SignUp_ButtonToSignIn.place(relx=0.5, rely=0.55, anchor="c")
SignUp_ButtonConfirm = Button(Window_SignUp, text="CONFIRM", font=("Arial Bold", 24), bd=10, background="#DA70D6",
command=SignUp_ButtonConfirm_Clicked)
SignUp_ButtonConfirm.place(relx=0.5, rely=0.7, anchor="c")
Window_SignUp.mainloop()
def OnlineShop_ButtonSignOut_Clicked():
Window_OnlineShop.withdraw()
Window_SignIn_PowerOn()
def OnlineShop_ButtonAccountInfo_Clicked():
Window_OnlineShop.withdraw()
Window_AccountInfo_PowerOn()
def OnlineShop_ButtonConfirmOrder_Clicked():
Window_OnlineShop.withdraw()
Window_PreParingOrder_PowerOn()
def OnlineShop_ButtonPreviousGood_Clicked():
Window_OnlineShop.withdraw()
global CurrentID
CurrentID -= 1
Window_OnlineShop_PowerOn()
def OnlineShop_ButtonNextGood_Clicked():
Window_OnlineShop.withdraw()
global CurrentID
CurrentID += 1
Window_OnlineShop_PowerOn()
def OnlineShop_ButtonTakeToBasket_Clicked():
global CurrentID
global CurrentName
global CurrentPrice
global CurrentGood
global CurrentOrder
CurrentGood[0] = CurrentID
CurrentGood[1] = CurrentName
CurrentGood[2] = CurrentPrice
CurrentGood[3] = int(HowManyWasTaken.get())
CurrentOrder.append(CurrentGood.copy())
Window_OnlineShop.withdraw()
Window_OnlineShop_PowerOn()
def Window_OnlineShop_PowerOn():
Window_OnlineShop.deiconify()
Window_OnlineShop.geometry('1500x800')
Window_OnlineShop.resizable(width=False, height=False)
Window_OnlineShop["bg"] = "#F0E68C"
Window_OnlineShop.title("Online Shop - Catalog")
cursor.execute("""SELECT COUNT(*) as count FROM GOODS""")
GoodsWholeAmount = cursor.fetchall()[0][0]
PreviousGoodButtonState = "normal"
NextGoodButtonState = "normal"
if CurrentID == 1:
PreviousGoodButtonState = "disabled"
if CurrentID == GoodsWholeAmount:
NextGoodButtonState = "disabled"
cursor.execute(f"""SELECT * FROM GOODS WHERE ID LIKE {CurrentID}""")
GoodInfo = cursor.fetchall()[0]
global CurrentName
CurrentName = GoodInfo[1]
global CurrentPrice
CurrentPrice = GoodInfo[2]
global CurrentAmount
CurrentAmount = GoodInfo[3]
TextForBasket = []
global CurrentOrder
k = 0
for GoodSort in CurrentOrder:
k += 1
TextForBasket.append(str(k) + ") " + GoodSort[1] + " ( " + "ID = " + str(GoodSort[0]) + " ), " +
str(GoodSort[3]) + " items, whole price - " + str(GoodSort[2]*GoodSort[3]) + "$\n")
OnlineShop_Lbl1 = Label(Window_OnlineShop, text="GOOD's SHOP", font=("Arial Bold", 50), bg="#F0E68C")
OnlineShop_Lbl1.place(relx=0.2, rely=0.08, anchor="c")
OnlineShop_ButtonAccountInfo = Button(Window_OnlineShop, text="ACCOUNT INFORMATION", font=("Arial Bold", 24), bd=10,
background="#DAA520", command=OnlineShop_ButtonAccountInfo_Clicked)
OnlineShop_ButtonAccountInfo.place(relx=0.65, rely=0.08, anchor="c")
OnlineShop_ButtonSignOut = Button(Window_OnlineShop, text="SIGN OUT", font=("Arial Bold", 24), bd=10,
background="#DAA520", command=OnlineShop_ButtonSignOut_Clicked)
OnlineShop_ButtonSignOut.place(relx=0.9, rely=0.08, anchor="c")
OnlineShop_Lb_ID = Label(Window_OnlineShop, text="Good's ID", font=("Arial Bold", 24), bg="#FFA500", padx=50,
pady=20, relief="solid")
OnlineShop_Lb_ID.place(relx=0.1, rely=0.35, anchor="c")
OnlineShop_Lb_ID_Value = Label(Window_OnlineShop, text=str(CurrentID), font=("Arial Bold", 12), bg="#F0E68C",
padx=50, pady=20)
OnlineShop_Lb_ID_Value.place(relx=0.1, rely=0.443, anchor="c")
OnlineShop_Lb_Name = Label(Window_OnlineShop, text="Good's Name", font=("Arial Bold", 24), bg="#FFA500", padx=50,
pady=20, relief="solid")
OnlineShop_Lb_Name.place(relx=0.28, rely=0.35, anchor="c")
OnlineShop_Lb_Name_Value = Label(Window_OnlineShop, text=str(CurrentName), font=("Arial Bold", 12), bg="#F0E68C",
padx=50, pady=20)
OnlineShop_Lb_Name_Value.place(relx=0.28, rely=0.443, anchor="c")
OnlineShop_Lb_Price = Label(Window_OnlineShop, text="Price", font=("Arial Bold", 24), bg="#FFA500", padx=50,
pady=20, relief="solid")
OnlineShop_Lb_Price.place(relx=0.438, rely=0.35, anchor="c")
OnlineShop_Lb_Price_Value = Label(Window_OnlineShop, text=str(CurrentPrice)+"$", font=("Arial Bold", 12),
bg="#F0E68C", padx=50, pady=20)
OnlineShop_Lb_Price_Value.place(relx=0.44, rely=0.443, anchor="c")
OnlineShop_Lb_Amount = Label(Window_OnlineShop, text="Amount in stock", font=("Arial Bold", 24), bg="#FFA500",
padx=50, pady=20, relief="solid")
OnlineShop_Lb_Amount.place(relx=0.607, rely=0.35, anchor="c")
OnlineShop_Lb_Amount_Value = Label(Window_OnlineShop, text=str(CurrentAmount), font=("Arial Bold", 12),
bg="#F0E68C", padx=50, pady=20)
OnlineShop_Lb_Amount_Value.place(relx=0.61, rely=0.443, anchor="c")
OnlineShop_Lb_Take = Label(Window_OnlineShop, text="Take some?", font=("Arial Bold", 24), bg="#00BFFF",
padx=50, pady=20, relief="solid")
OnlineShop_Lb_Take.place(relx=0.85, rely=0.35, anchor="c")
OnlineShop_SpinBox = Spinbox(Window_OnlineShop, from_=1, to=CurrentAmount, width=5, bg="#00FFFF", bd=10,
textvariable=HowManyWasTaken)
OnlineShop_SpinBox.place(relx=0.78, rely=0.45, anchor="c")
OnlineShop_ButtonTakeToBasket = Button(Window_OnlineShop, text="TAKE TO BASKET", font=("Arial Bold", 12), bd=10,
background="#00FFFF", command=OnlineShop_ButtonTakeToBasket_Clicked)
OnlineShop_ButtonTakeToBasket.place(relx=0.88, rely=0.45, anchor="c")
OnlineShop_ButtonConfirmOrder = Button(Window_OnlineShop, text="CONFIRM AND GO TO THE ORDER PREPARING",
font=("Arial Bold", 24), bd=10, background="#DAA520",
command=OnlineShop_ButtonConfirmOrder_Clicked)
OnlineShop_ButtonConfirmOrder.place(relx=0.3, rely=0.9, anchor="c")
OnlineShop_Lbl2 = Label(Window_OnlineShop, text="PREVIOUS GOOD", font=("Arial Bold", 36), bg="#F0E68C")
OnlineShop_Lbl2.place(relx=0.15, rely=0.58, anchor="c")
OnlineShop_Lbl3 = Label(Window_OnlineShop, text="NEXT GOOD", font=("Arial Bold", 36), bg="#F0E68C")
OnlineShop_Lbl3.place(relx=0.45, rely=0.58, anchor="c")
OnlineShop_ButtonPreviousGood = Button(Window_OnlineShop, text="<--", font=("Arial Bold", 40), bd=10,
background="#BDB76B", state=PreviousGoodButtonState,
command=OnlineShop_ButtonPreviousGood_Clicked)
OnlineShop_ButtonPreviousGood.place(relx=0.15, rely=0.7, anchor="c")
OnlineShop_ButtonNextGood = Button(Window_OnlineShop, text="-->", font=("Arial Bold", 40), bd=10,
background="#BDB76B", state=NextGoodButtonState,
command=OnlineShop_ButtonNextGood_Clicked)
OnlineShop_ButtonNextGood.place(relx=0.45, rely=0.7, anchor="c")
OnlineShop_Lbl_Basket = Label(Window_OnlineShop, text="BASKET:", font=("Arial Bold", 36), bg="#F0E68C")
OnlineShop_Lbl_Basket.place(relx=0.8, rely=0.55, anchor="c")
OnlineShop_BasketInfoTxt = Text(Window_OnlineShop, height=7, width=35, bd=5, font=("Times New Roman", 24))
OnlineShop_BasketInfoTxt.place(relx=0.79, rely=0.77, anchor="c")
for i in range(len(TextForBasket)):
OnlineShop_BasketInfoTxt.insert(INSERT, TextForBasket[i])
Window_OnlineShop.mainloop()
def AccountInfo_ButtonGoShopping_Clicked():
Window_AccountInfo.withdraw()
Window_OnlineShop_PowerOn()
def AccountInfo_ButtonSignOut_Clicked():
Window_AccountInfo.withdraw()
Window_SignIn_PowerOn()
def AccountInfo_ButtonConfirmNewEmail_Clicked():
if '@' in str(AccountInfo_NewEmail.get()):
sql = f"""UPDATE ACCOUNTS SET email = '{str(AccountInfo_NewEmail.get())}' WHERE email = '{str(SignIn_Email.get())}'"""
cursor.execute(sql)
conn.commit()
messagebox.showinfo("Online Shop - Account Information",
"Email changed successfully! You need to sign in again.")
Window_AccountInfo.withdraw()
Window_SignIn_PowerOn()
else:
messagebox.showinfo("Online Shop - Sign Up", f"Entered email {str(AccountInfo_NewEmail.get())} is not valid!")
def AccountInfo_ButtonConfirmNewPassword_Clicked():
if 7 < len((str(AccountInfo_NewPassword.get()))) < 17:
sql = f"""UPDATE ACCOUNTS SET password = '{str(AccountInfo_NewPassword.get())}' WHERE email = '{str(SignIn_Email.get())}'"""
cursor.execute(sql)
conn.commit()
messagebox.showinfo("Online Shop - Account Information",
"Password changed successfully! You need to sigh in again")
Window_AccountInfo.withdraw()
Window_SignIn_PowerOn()
else:
messagebox.showinfo("Online Shop - Sign Up", f"Password must be from 8 to 16 characters!")
def Chiphered_Password(size):
return '*' * size
def Window_AccountInfo_PowerOn():
Window_AccountInfo.deiconify()
Window_AccountInfo.geometry('1500x800')
Window_AccountInfo.resizable(width=False, height=False)
Window_AccountInfo["bg"] = "#F08080"
Window_AccountInfo.title("Online Shop - Account Information")
AccountInfo_Lbl1 = Label(Window_AccountInfo, text="ACCOUNT INFORMATION", font=("Arial Bold", 50), bg="#F08080")
AccountInfo_Lbl1.place(relx=0.3, rely=0.08, anchor="c")
AccountInfo_ButtonGoShopping = Button(Window_AccountInfo, text="GO SHOPPING", font=("Arial Bold", 24), bd=10,
background="#DC143C", command=AccountInfo_ButtonGoShopping_Clicked)
AccountInfo_ButtonGoShopping.place(relx=0.7, rely=0.08, anchor="c")
AccountInfo_ButtonSignOut = Button(Window_AccountInfo, text="SIGN OUT", font=("Arial Bold", 24), bd=10,
background="#DC143C", command=AccountInfo_ButtonSignOut_Clicked)
AccountInfo_ButtonSignOut.place(relx=0.9, rely=0.08, anchor="c")
AccountInfo_Lbl2 = Label(Window_AccountInfo, text="Current EMAIL", font=("Arial Bold", 50), bg="#F08080")
AccountInfo_Lbl2.place(relx=0.25, rely=0.3, anchor="c")
AccountInfo_Lbl3 = Label(Window_AccountInfo, text="Current PASSWORD", font=("Arial Bold", 50), bg="#F08080")
AccountInfo_Lbl3.place(relx=0.7, rely=0.3, anchor="c")
AccountInfo_Lbl4 = Label(Window_AccountInfo, text=str(SignIn_Email.get()), font=("Arial Bold", 36), bg="#F08080")
AccountInfo_Lbl4.place(relx=0.25, rely=0.4, anchor="c")
AccountInfo_Lbl5 = Label(Window_AccountInfo, text=Chiphered_Password(len(str(SignIn_Password.get()))),
font=("Arial Bold", 36), bg="#F08080")
AccountInfo_Lbl5.place(relx=0.7, rely=0.4, anchor="c")
AccountInfo_Lbl6 = Label(Window_AccountInfo, text="Change EMAIL?", font=("Arial Bold", 50), bg="#F08080")
AccountInfo_Lbl6.place(relx=0.25, rely=0.6, anchor="c")
AccountInfo_Lbl7 = Label(Window_AccountInfo, text="Change PASSWORD?", font=("Arial Bold", 50), bg="#F08080")
AccountInfo_Lbl7.place(relx=0.7, rely=0.6, anchor="c")
AccountInfo_NewEmailTxt = Entry(Window_AccountInfo, width=20, bd=5, font=("Arial Bold", 36),
textvariable=AccountInfo_NewEmail)
AccountInfo_NewEmailTxt.place(relx=0.25, rely=0.7, anchor="c")
AccountInfo_NewPasswordTxt = Entry(Window_AccountInfo, width=20, bd=5, font=("Arial Bold", 36), show='*',
textvariable=AccountInfo_NewPassword)
AccountInfo_NewPasswordTxt.place(relx=0.7, rely=0.7, anchor="c")
AccountInfo_ButtonConfirmNewEmail = Button(Window_AccountInfo, text="CONFIRM", font=("Arial Bold", 24), bd=10,
background="#DC143C", command=AccountInfo_ButtonConfirmNewEmail_Clicked)
AccountInfo_ButtonConfirmNewEmail.place(relx=0.25, rely=0.85, anchor="c")
AccountInfo_ButtonConfirmNewPassword = Button(Window_AccountInfo, text="CONFIRM", font=("Arial Bold", 24), bd=10,
background="#DC143C",
command=AccountInfo_ButtonConfirmNewPassword_Clicked)
AccountInfo_ButtonConfirmNewPassword.place(relx=0.7, rely=0.85, anchor="c")
Window_AccountInfo.mainloop()
def PreparingOrder_ButtonGoShopping_Clicked():
Window_PreparingOrder.withdraw()
Window_OnlineShop_PowerOn()
def PreparingOrder_ButtonSignOut_Clicked():
Window_PreparingOrder.withdraw()
Window_SignIn_PowerOn()
def PreparingOrder_ButtonConfirmOrder_Clicked():
if '+' in str(MobilePhone.get()) and len(str(MobilePhone.get())) == 13:
messagebox.showinfo("Online Shop - Preparing Order", "Order successfully prepared! We will contact you soon!")
global CurrentOrder
CurrentOrder = []
global CurrentID
CurrentID = 1
Window_PreparingOrder.withdraw()
Window_OnlineShop_PowerOn()
else:
messagebox.showinfo("Online Shop - Preparing Order", "Invalid mobile phone entered!")
def Window_PreParingOrder_PowerOn():
Window_PreparingOrder.deiconify()
Window_PreparingOrder.geometry('1500x800')
Window_PreparingOrder.resizable(width=False, height=False)
Window_PreparingOrder["bg"] = "#87CEFA"
Window_PreparingOrder.title("Online Shop - Preparing Order")
PreparingOrder_Lbl1 = Label(Window_PreparingOrder, text="PREPARING ORDER", font=("Arial Bold", 50), bg="#87CEFA")
PreparingOrder_Lbl1.place(relx=0.25, rely=0.08, anchor="c")
PreparingOrder_ButtonGoShopping = Button(Window_PreparingOrder, text="GO SHOPPING", font=("Arial Bold", 24), bd=10,
background="#1E90FF", command=PreparingOrder_ButtonGoShopping_Clicked)
PreparingOrder_ButtonGoShopping.place(relx=0.7, rely=0.08, anchor="c")
PreparingOrder_ButtonSignOut = Button(Window_PreparingOrder, text="SIGN OUT", font=("Arial Bold", 24), bd=10,
background="#1E90FF", command=PreparingOrder_ButtonSignOut_Clicked)
PreparingOrder_ButtonSignOut.place(relx=0.9, rely=0.08, anchor="c")
PreparingOrder_Lbl2 = Label(Window_PreparingOrder, text="YOUR ORDER:", font=("Arial Bold", 50), bg="#87CEFA")
PreparingOrder_Lbl2.place(relx=0.17, rely=0.25, anchor="c")
PreparingOrder_OrderInfoTxt = Text(Window_PreparingOrder, height=7, width=59, bd=5, font=("Times New Roman", 24))
PreparingOrder_OrderInfoTxt.place(relx=0.66, rely=0.35, anchor="c")
TextForBasket = []
global CurrentOrder
k = 0
WholePrice = 0.0
for GoodSort in CurrentOrder:
k += 1
WholePrice += GoodSort[2] * GoodSort[3]
TextForBasket.append(str(k) + ") " + GoodSort[1] + " ( " + "ID = " + str(GoodSort[0]) + " ), " +
str(GoodSort[3]) + " items, whole price - " + str(GoodSort[2] * GoodSort[3]) + "$\n")
TextForBasket.append("WHOLE SUM: " + str(WholePrice) + "$")
for i in range(len(TextForBasket)):
PreparingOrder_OrderInfoTxt.insert(INSERT, TextForBasket[i])
PreparingOrder_Lbl3 = Label(Window_PreparingOrder, text="Indicate your contacts and we will accept your order:",
font=("Arial Bold", 36), bg="#87CEFA")
PreparingOrder_Lbl3.place(relx=0.4, rely=0.6, anchor="c")
PreparingOrder_Lbl4 = Label(Window_PreparingOrder, text="Mobile Phone:", font=("Arial Bold", 36), bg="#87CEFA")
PreparingOrder_Lbl4.place(relx=0.2, rely=0.7, anchor="c")
PreparingOrder_MobilePhoneTxt = Entry(Window_PreparingOrder, width=13, bd=5, font=("Arial Bold", 36),
textvariable=MobilePhone)
PreparingOrder_MobilePhoneTxt.place(relx=0.438, rely=0.7, anchor="c")
PreparingOrder_Lbl5 = Label(Window_PreparingOrder, text="Address:", font=("Arial Bold", 36), bg="#87CEFA")
PreparingOrder_Lbl5.place(relx=0.238, rely=0.8, anchor="c")
PreparingOrder_AddressTxt = Text(Window_PreparingOrder, height=3, width=60, bd=5, font=("Times New Roman", 24))
PreparingOrder_AddressTxt.place(relx=0.64, rely=0.85, anchor="c")
PreparingOrder_ButtonConfirmOrder = Button(Window_PreparingOrder, text="CONFIRM ORDER", font=("Arial Bold", 24),
bd=10, background="#1E90FF",
command=PreparingOrder_ButtonConfirmOrder_Clicked)
PreparingOrder_ButtonConfirmOrder.place(relx=0.15, rely=0.92, anchor="c")
Window_PreparingOrder.mainloop()
# --- БЛОК РАБОТЫ С GUI ---
Window_SignIn_PowerOn() # Запуск со стартовой страницы
|
[
"shist.pupust@mail.ru"
] |
shist.pupust@mail.ru
|
e358e020b0dd01cb1f401a59c6dc293c3929cbad
|
6ea69f9a4431837a36b04ab926ac9c565b8a5eb6
|
/pydemic_ui/i18n.py
|
53f5ca158c862fef6e82e4716ab8fbbb4df02fcd
|
[
"MIT"
] |
permissive
|
WillAllmeida/pydemic-ui
|
e9fad4845c428f3e2f0e7a65913391c3216083b3
|
f7c05d97489918736b0c7b1da4b0992bd77ed9a1
|
refs/heads/master
| 2023-01-24T13:49:27.219863
| 2020-10-07T18:11:02
| 2020-10-07T18:11:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 945
|
py
|
from gettext import gettext
from pathlib import Path
import sidekick as sk
LOCALEDIR = Path(__file__).parent / "locale"
def set_i18n(lang, language=None):
"""
Set locale and translations.
Examples:
set_i18n('pt_BR.UTF-8') -> set locale to pt_BR.UTF-8 and language to pt_BR.
"""
import gettext
import locale
import warnings
import os
try:
locale.setlocale(locale.LC_ALL, lang)
locale.setlocale(locale.LC_MESSAGES, language or lang)
os.environ["LANG"] = lang
os.environ["LANGUAGE"] = language or lang.split(".")[0]
except locale.Error:
warnings.warn(f"locale is not supported: {lang}")
gettext.bindtextdomain("messages", localedir=LOCALEDIR)
def run():
import os
lang = os.environ.get("PYDEMIC_LANG") or os.environ.get("LANG")
set_i18n(lang)
def gettext_lazy(st):
return sk.deferred(gettext, st)
_ = gettext
__ = gettext_lazy
|
[
"fabiomacedomendes@gmail.com"
] |
fabiomacedomendes@gmail.com
|
a344bfbfba2175a962b94b0450d79418dd1cd225
|
8246e9fbdecdb37651e0d09497fd9428e434f33c
|
/ServiceCatagory/urls.py
|
e17e6184bb33e3ef9d66c52d6ec20aae7d197828
|
[] |
no_license
|
rajeev1234/Landing-Page
|
479995026ab01fc504a1e9502e7763dc04266009
|
4bfd22a6a1776907ba78b3dc9037064c820b049e
|
refs/heads/master
| 2020-03-08T13:37:20.253252
| 2018-04-05T06:33:26
| 2018-04-05T06:33:26
| 128,162,519
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 863
|
py
|
from django.urls import path
from . import views
urlpatterns = [
# Path to list view of ServiceCatagory : ServiceCatagory_list
path('', views.ServiceCatagoryListView.as_view(), name='ServiceCatagory_list'),
# Path to create new ServiceCatagory : ServiceCatagory_new
path('new/', views.ServiceCatagoryCreateView.as_view(), name='ServiceCatagory_new'),
# Path to edit ServiceCatagory : edit_list
path('<int:pk>/edit', views.ServiceCatagoryUpdateView.as_view(), name='ServiceCatagory_update'),
# Path to delete ServiceCatagory : ServiceCatagory_delete
path('<int:pk>/delete', views.ServiceCatagoryDeleteView.as_view(), name='ServiceCatagory_delete'),
# Path to detail view of ServiceCatagory : ServiceCatagory_details
path('<int:pk>', views.ServiceCatagoryDetailView.as_view(), name='ServiceCatagory_details')
]
|
[
"ccrcian.rajeev1@gmail.com"
] |
ccrcian.rajeev1@gmail.com
|
0e535165547e2b25ce06072821dc32d3a608475f
|
c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c
|
/cases/pa3/benchmarks/sieve-244.py
|
29dca34d6015fcd1da12c7a36d7a12abcdf185e5
|
[] |
no_license
|
Virtlink/ccbench-chocopy
|
c3f7f6af6349aff6503196f727ef89f210a1eac8
|
c7efae43bf32696ee2b2ee781bdfe4f7730dec3f
|
refs/heads/main
| 2023-04-07T15:07:12.464038
| 2022-02-03T15:42:39
| 2022-02-03T15:42:39
| 451,969,776
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,581
|
py
|
# A resizable list of integers
class Vector(object):
items: [int] = None
size: int = 0
def __init__(self:"Vector"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector", new_items: [int]) -> object:
item:int = 0
for item in $ID:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector", idx: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector") -> int:
return self.size
# A faster (but more memory-consuming) implementation of vector
class DoublingVector(Vector):
doubling_limit:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Makes a vector in the range [i, j)
def vrange(i:int, j:int) -> Vector:
v:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
# Sieve of Eratosthenes (not really)
def sieve(v:Vector) -> object:
i:int = 0
j:int = 0
k:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
# Input parameter
n:int = 50
# Data
v:Vector = None
i:int = 0
# Crunch
v = vrange(2, n)
sieve(v)
# Print
while i < v.length():
print(v.get(i))
i = i + 1
|
[
"647530+Virtlink@users.noreply.github.com"
] |
647530+Virtlink@users.noreply.github.com
|
47ab96d26333bbbb7fe7e511845362084c6c14f8
|
6c838b17ff4e68a958045bcaab17371250748100
|
/python/looping/assignment/no2).py
|
b250ed9ef617c2218f40abb71273cc256b779b70
|
[] |
no_license
|
suchishree/django_assignment1
|
b0938c5de2d0b508f711e569110a12d229b6c0f5
|
1e6b83dce9505c35d7a1b31414c81f2596a00943
|
refs/heads/main
| 2023-07-15T05:10:15.842262
| 2021-08-13T14:25:39
| 2021-08-13T14:25:39
| 376,197,760
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 42
|
py
|
for i in range(10):
print(i, end=" ")
|
[
"suchishree99@gmail.com"
] |
suchishree99@gmail.com
|
aab3d5f4fcf039292e9e675febb1d2c545bee017
|
d9e08fdaf07a242a6fb071ff47bb8aaadba86c72
|
/2016Vertexing/GenerateEfficiencyTable_KillTracks.py
|
4c63bf0afa83f0518d2e6deb06ccfc74c7c2853a
|
[] |
no_license
|
mrsolt/HPS
|
8d6577b54979b463c2ca8cc6d66db8b704df4ed4
|
2678da7801daf0eb314d11f837e235138ebe8340
|
refs/heads/master
| 2021-01-19T03:43:50.264869
| 2020-09-24T18:35:19
| 2020-09-24T18:35:19
| 46,081,219
| 1
| 2
| null | 2020-08-05T21:54:12
| 2015-11-12T21:13:52
|
Python
|
UTF-8
|
Python
| false
| false
| 49,043
|
py
|
#!/usr/bin/env python
#Author Matt Solt mrsolt@slac.stanford.edu
import sys
import array, math
import numpy as np
import random
import ROOT
from ROOT import gROOT, gStyle, TFile, TTree, TChain, TMVA, TCut, TCanvas, gDirectory, TH1, TGraph, gPad, TF1, TH1F, TLegend, TLatex, TGraphErrors
import getopt
def print_usage():
print "\nUsage: {0} <output file basename> <L1L1 input files list> <L1L2 input files list> <L2L2 input files list> <input recon truth tuple files list>".format(sys.argv[0])
print "Arguments: "
print '\t-e: use this beam energy <default 2.3 GeV>'
print '\t-t: use this target position <default -4.3 mm>'
print '\t-n: number of bins in histograms <default 50>'
print '\t-z: total range in z covered <default 100 mm>'
print '\t-T: plot Test plots'
print '\t-N: number of bins from target to normalize to <default is 4>'
print '\t-s: tuple name <default is "ntuple">'
print '\t-h: this help message'
print
#Default Values
eBeam = 2.3
makeTestPlots = False
killInTrackSlope = True
targZ = -4.3
nBins = 50
zRange = 100
nNorm = 4
tupleName = "ntuple"
fittype = 5
#Function to plot efficiency tests of known masses
def plotTest(iMass,inputFile,output,targZ,maxZ,canvas):
inputfile = open(inputFile,"r")
mass = []
z = []
result = []
eff = []
#Readlines from input file
lines = inputfile.readlines()
for x in lines:
result.append(x.split())
inputfile.close()
nMass = len(result[0])
nBins = len(result[1])
#Grab Array of Masses
for i in range(nMass):
mass.append(float(result[0][i]))
#Grab Array of z's
for i in range(nBins):
z.append(float(result[1][i]))
#Convert the strings from input file into floats
for i in range(nMass):
dummy = []
for j in range(nBins):
dummy.append(float(result[i+2][j]))
eff.append(dummy)
del dummy
#define histograms
histo1 = TH1F("histo1","histo1",nBins-1,targZ,maxZ) #test histogram
histo2 = TH1F("histo2","histo2",nBins,targZ,maxZ) #known histogram
#Use the mass greater than and less than the mass of interest
iMass1 = iMass - 1
iMass2 = iMass + 1
for i in range(nBins-1):
iZ1 = i
iZ2 = i + 1
Q11 = eff[iMass1][iZ1]
Q12 = eff[iMass2][iZ1]
Q21 = eff[iMass1][iZ2]
Q22 = eff[iMass2][iZ2]
#Interpolate value
interpolate = Bilinear(z[i],mass[iMass],z[iZ1],z[iZ2],mass[iMass1],mass[iMass2],Q11,Q12,Q21,Q22)
histo1.SetBinContent(i+1,interpolate)
for i in range(nBins):
histo2.SetBinContent(i+1,eff[iMass][i])
#Draw Histograms
legend = TLegend(.68,.66,.92,.87)
legend.SetBorderSize(0)
legend.SetFillColor(0)
legend.SetFillStyle(0)
legend.SetTextFont(42)
legend.SetTextSize(0.035)
legend.AddEntry(histo1,"Interpolation","LP")
legend.AddEntry(histo2,"MC","LP")
maximum = histo1.GetMaximum()
if(histo2.GetMaximum() > maximum): maximum = histo2.GetMaximum()
histo1.Draw("")
histo1.GetXaxis().SetTitle("z [mm]")
histo1.SetTitle("A' Efficiency {0:0.3f} GeV".format(mass[iMass]))
histo1.GetYaxis().SetRangeUser(0,maximum*1.2)
histo2.Draw("same")
histo2.SetLineColor(2)
legend.Draw("")
gStyle.SetOptStat(0)
canvas.Print(output+".pdf")
#Function to plot efficiency tests of known masses
def Interpolate(Mass,Z,mass,z,eff):
iMass = 0
iZ = 0
#Grab the index of mass and z
for i in range(nMass):
if(Mass < mass[i]):
iMass = i
break
for i in range(nBins):
if(Z < z[i]):
iZ = i
break
#Check to make sure mass and z are not out of range
if(iMass == 0):
print "Mass is out of range!"
return
if(iZ == 0):
print "Z is behind target!"
return
iMass1 = iMass - 1
iMass2 = iMass
iZ1 = iZ - 1
iZ2 = iZ
Q11 = eff[iMass1][iZ1]
Q12 = eff[iMass2][iZ1]
Q21 = eff[iMass1][iZ2]
Q22 = eff[iMass2][iZ2]
#Interpolate value
interpolate = Bilinear(Z,Mass,z[iZ1],z[iZ2],mass[iMass1],mass[iMass2],Q11,Q12,Q21,Q22)
return interpolate
#Function to plot efficiency tests of known masses directly from file
def InterpolateFromFile(Mass,Z,inputFile):
mass = getMassArray(inputFile)
z = getZArray(inputFile)
eff = getEfficiency(inputFile)
interpolate = Interpolate(Mass,Z,mass,z,eff)
return interpolate
def getMassArray(inputFile):
inputfile = open(inputFile,"r")
mass = []
result = []
#Readlines from input file
lines = inputfile.readlines()
for x in lines:
result.append(x.split())
inputfile.close()
nMass = len(result[0])
#Grab Array of Masses
for i in range(nMass):
mass.append(float(result[0][i]))
return mass
def getZArray(inputFile):
inputfile = open(inputFile,"r")
z = []
result = []
#Readlines from input file
lines = inputfile.readlines()
for x in lines:
result.append(x.split())
inputfile.close()
nBins = len(result[1])
#Grab Array of z's
for i in range(nBins):
z.append(float(result[1][i]))
return z
def getEfficiency(inputFile):
inputfile = open(inputFile,"r")
result = []
eff = []
#Readlines from input file
lines = inputfile.readlines()
for x in lines:
result.append(x.split())
inputfile.close()
nMass = len(result[0])
nBins = len(result[1])
#Convert the strings from input file into floats
for i in range(nMass):
dummy = []
for j in range(nBins):
dummy.append(float(result[i+2][j]))
eff.append(dummy)
del dummy
return eff
#Function for Bilinear interpolation
def Bilinear(x,y,x1,x2,y1,y2,Q11,Q12,Q21,Q22):
denom = (x2-x1)*(y2-y1)
t1 = (x2-x)*(y2-y)/denom*Q11
t2 = (x-x1)*(y2-y)/denom*Q21
t3 = (x2-x)*(y-y1)/denom*Q12
t4 = (x-x1)*(y-y1)/denom*Q22
return t1+t2+t3+t4
def plotEff(inputFile,output,nBins,targZ,maxZ,canvas):
inputfile = open(inputFile,"r")
mass = getMassArray(inputFile)
z = getZArray(inputFile)
eff = getEfficiency(inputFile)
histos = []
for i in range(len(mass)):
histos.append(TH1F("histo{0}".format(mass[i]),"histo{0}".format(mass[i]),nBins,targZ,maxZ))
legend = TLegend(.68,.50,.92,.97)
legend.SetBorderSize(0)
legend.SetFillColor(0)
legend.SetFillStyle(0)
legend.SetTextFont(42)
legend.SetTextSize(0.035)
maximum = 0
for i in range(len(mass)):
for j in range(len(z)):
histos[i].SetBinContent(j+1,eff[i][j])
if(eff[i][0] > maximum):
maximum = eff[i][0]
#legend.AddEntry(histos[i],str().format('{0:.3f}',mass[i]*1000)+" MeV","LP")
legend.AddEntry(histos[i],str("%.3g" % (mass[i] * 1000))+" MeV","LP")
if(i == 0):
histos[i].Draw()
histos[i].SetStats(0)
histos[i].GetXaxis().SetTitle("z [mm]")
histos[i].GetYaxis().SetTitle("efficiency")
histos[i].SetTitle("A' Acceptance * Efficiency")
else:
histos[i].Draw("same")
histos[i].SetLineColor(i+1)
histos[0].GetYaxis().SetRangeUser(0,0.13)
legend.Draw()
canvas.Print(output+".png")
def plotEff2(histos,histosTruth,normArr,output,outPDF,outfileroot,canvas,mass,useNorm,title=""):
outfileroot.cd()
canvas.Clear()
legend = TLegend(.68,.50,.92,.97)
legend.SetBorderSize(0)
legend.SetFillColor(0)
legend.SetFillStyle(0)
legend.SetTextFont(42)
legend.SetTextSize(0.035)
maximum = 0
histos_copy = []
for i in range(len(mass)):
histos_copy.append(histos[i].Clone())
for i in range(len(mass)):
histos_copy[i].SetLineColor(i+1)
histos_copy[i].Divide(histosTruth[i])
if(useNorm):
histos_copy[i].Scale(1/normArr[i])
if(histos[i].GetMaximum() > maximum):
maximum = histos_copy[i].GetMaximum()
legend.AddEntry(histos_copy[i],str("%.3g" % (mass[i] * 1000))+" MeV","LP")
#histos[i].Sumw2()
if(i == 0):
histos_copy[i].Draw()
histos_copy[i].SetStats(0)
histos_copy[i].GetXaxis().SetTitle("z [mm]")
histos_copy[i].GetYaxis().SetTitle("efficiency")
histos_copy[i].SetTitle("A' Acceptance * Efficiency {0}".format(title))
else:
histos_copy[i].Draw("same")
histos_copy[0].GetYaxis().SetRangeUser(0,1.3*maximum)
legend.Draw()
canvas.Print(output+".png")
canvas.Print(outPDF+".pdf")
canvas.Write()
del histos_copy
def plotAll(histosL1L1,histosL1L2,histosL2L2,histosTruth,normArr,output,outPDF,outfileroot,canvas,mass,title=""):
outfileroot.cd()
maximum = 0
histos_copy_L1L1 = []
histos_copy_L1L2 = []
histos_copy_L2L2 = []
for i in range(len(mass)):
histos_copy_L1L1.append(histosL1L1[i].Clone())
histos_copy_L1L2.append(histosL1L2[i].Clone())
histos_copy_L2L2.append(histosL2L2[i].Clone())
for i in range(len(mass)):
canvas.Clear()
histos_copy_L1L1[i].SetLineColor(1)
histos_copy_L1L2[i].SetLineColor(2)
histos_copy_L2L2[i].SetLineColor(4)
histos_copy_L1L1[i].Divide(histosTruth[i])
histos_copy_L1L2[i].Divide(histosTruth[i])
histos_copy_L2L2[i].Divide(histosTruth[i])
sumhisto = histos_copy_L1L1[i].Clone()
sumhisto.Add(histos_copy_L1L2[i])
sumhisto.Add(histos_copy_L2L2[i])
sumhisto.SetLineColor(28)
maximum = sumhisto.GetMaximum()
legend = TLegend(.68,.70,.92,.90)
legend.SetBorderSize(0)
legend.SetFillColor(0)
legend.SetFillStyle(0)
legend.SetTextFont(42)
legend.SetTextSize(0.035)
legend.AddEntry(histos_copy_L1L1[i],"L1L1","LP")
legend.AddEntry(histos_copy_L1L2[i],"L1L2","LP")
legend.AddEntry(histos_copy_L2L2[i],"L2L2","LP")
legend.AddEntry(sumhisto,"Sum","LP")
histos_copy_L1L1[i].Draw()
histos_copy_L1L1[i].SetStats(0)
histos_copy_L1L1[i].GetXaxis().SetTitle("z [mm]")
histos_copy_L1L1[i].GetYaxis().SetTitle("efficiency")
histos_copy_L1L1[i].SetTitle("A' Acceptance * Efficiency {0:0.0f} MeV A' {1}".format(mass[i]*1000,title))
histos_copy_L1L1[i].GetYaxis().SetRangeUser(0,1.3*maximum)
histos_copy_L1L2[i].Draw("same")
histos_copy_L2L2[i].Draw("same")
sumhisto.Draw("same")
legend.Draw()
canvas.Print(output+".png")
canvas.Print(outPDF+".pdf")
canvas.Write()
canvas.Clear()
histos_copy_L1L1[i].Scale(1/normArr[i])
histos_copy_L1L2[i].Scale(1/normArr[i])
histos_copy_L2L2[i].Scale(1/normArr[i])
sumhistonorm = histos_copy_L1L1[i].Clone()
sumhistonorm.Add(histos_copy_L1L2[i])
sumhistonorm.Add(histos_copy_L2L2[i])
sumhistonorm.SetLineColor(28)
maximum = sumhistonorm.GetMaximum()
histos_copy_L1L1[i].Draw()
histos_copy_L1L1[i].SetStats(0)
histos_copy_L1L1[i].GetXaxis().SetTitle("Truth z [mm]")
histos_copy_L1L1[i].GetYaxis().SetTitle("efficiency")
histos_copy_L1L1[i].SetTitle("Normalized A' Acceptance * Efficiency {0:0.0f} MeV A' {1}".format(mass[i]*1000,title))
histos_copy_L1L1[i].GetYaxis().SetRangeUser(0,1.3*maximum)
histos_copy_L1L2[i].Draw("same")
histos_copy_L2L2[i].Draw("same")
sumhistonorm.Draw("same")
legend.Draw()
canvas.Print(output+".png")
canvas.Print(outPDF+".pdf")
canvas.Write()
del sumhisto
del sumhistonorm
del legend
del histos_copy_L1L1
del histos_copy_L1L2
del histos_copy_L2L2
def plotFit(histoL1L1,histoL1L2,histoL2L2,histoTruth,normArr,outPDF,outfileroot,canvas,mass,targZ,title=""):
outfileroot.cd()
maximum = 0
histo_copy_L1L1 = histoL1L1.Clone()
histo_copy_L1L2 = histoL1L2.Clone()
histo_copy_L2L2 = histoL2L2.Clone()
canvas.Clear()
histo_copy_L1L1.SetLineColor(1)
histo_copy_L1L2.SetLineColor(2)
histo_copy_L2L2.SetLineColor(4)
histo_copy_L1L1.Divide(histoTruth)
histo_copy_L1L2.Divide(histoTruth)
histo_copy_L2L2.Divide(histoTruth)
sumhisto = histo_copy_L1L1.Clone()
sumhisto.Add(histo_copy_L1L2)
sumhisto.Add(histo_copy_L2L2)
sumhisto.SetLineColor(28)
sumhisto.Fit("exppol4","QR")
maximum = sumhisto.GetMaximum()
legend = TLegend(.68,.70,.92,.90)
legend.SetBorderSize(0)
legend.SetFillColor(0)
legend.SetFillStyle(0)
legend.SetTextFont(42)
legend.SetTextSize(0.035)
legend.AddEntry(histo_copy_L1L1,"L1L1","LP")
legend.AddEntry(histo_copy_L1L2,"L1L2","LP")
legend.AddEntry(histo_copy_L2L2,"L2L2","LP")
legend.AddEntry(sumhisto,"Sum","LP")
histo_copy_L1L1.SetStats(0)
histo_copy_L1L1.Draw()
histo_copy_L1L1.GetXaxis().SetTitle("z [mm]")
histo_copy_L1L1.GetYaxis().SetTitle("efficiency")
histo_copy_L1L1.SetTitle("A' Acceptance * Efficiency {0:0.0f} MeV A' {1}".format(mass*1000,title))
histo_copy_L1L1.GetYaxis().SetRangeUser(0,1.3*maximum)
histo_copy_L1L2.Draw("same")
histo_copy_L2L2.Draw("same")
sumhisto.Draw("same")
legend.Draw()
canvas.Print(outPDF+".pdf")
canvas.Write()
del sumhisto
del legend
del histo_copy_L1L1
del histo_copy_L1L2
del histo_copy_L2L2
return exppol4.Eval(targZ)
def getEffTH1(hfile, hname):
print 'Getting Efficiency Graph...converting to TH1'
effGraph=hfile.Get(hname)
effGraph.Print("v")
xmin=effGraph.GetXaxis().GetXmin()
xmax=effGraph.GetXaxis().GetXmax()
xsize=effGraph.GetErrorXhigh(0)*2
nbins=effGraph.GetN()
nbinsHist=(int)((xmax-xmin)/xsize)
x=ROOT.Double(0.0)
y=ROOT.Double(0.0)
effHist=ROOT.TH1D(effGraph.GetName(),effGraph.GetTitle(),nbinsHist,xmin,xmax)
for i in range(0,nbins) :
effGraph.GetPoint(i,x,y)
histBin=effHist.FindBin(x)
#print str(x)+' ' +str(y) + ' '+str(i)+ ' '+str(histBin)
effHist.SetBinContent(histBin,y)
return effHist
options, remainder = getopt.gnu_getopt(sys.argv[1:], 'e:t:n:z:TN:s:f:h')
# Parse the command line arguments
for opt, arg in options:
if opt=='-e':
eBeam=float(arg)
if opt=='-t':
targZ=float(arg)
if opt=='-n':
nBins=int(arg)
if opt=='-z':
zRange=float(arg)
if opt=='-T':
makeTestPlots = True
if opt=='-N':
nNorm = int(arg)
if opt=='-s':
tupleName = str(arg)
if opt=='-f':
fittype = int(arg)
if opt=='-h':
print_usage()
sys.exit(0)
if len(remainder)!=5:
print_usage()
sys.exit(0)
if killInTrackSlope:
#effSlopeFileName = '/home/mrsolt/hps/test/EmGamma-L1HitEfficiencyResults-2016.root'
effSlopeFileName = 'EmGamma-L1HitEfficiencyResults-2016.root'
effRatioName = 'p2slopehps_007963.1GamEm_L1HitInefficiency'
effSlopeFile = ROOT.TFile(effSlopeFileName)
effSlopeData = getEffTH1(effSlopeFile,effRatioName)
effSlopeData.Print("v")
total = TH1F("total","total",nBins,-0.08,0.08)
passed = TH1F("passed","passed",nBins,-0.08,0.08)
def RemoveHit(slp):
rndm = random.random()
ibin = effSlopeData.FindBin(slp)
eff = 1 - effSlopeData.GetBinContent(ibin) #the slope "efficiency" is actually an inefficiency
total.Fill(slp)
if rndm > eff:
#if rndm > 0.8:
return True
else:
passed.Fill(slp)
return False
def NewEventsL1L1(events,mass,outfile):
eleNTrackHits = array.array('d',[0])
posNTrackHits = array.array('d',[0])
eleTrkLambda = array.array('d',[0])
posTrkLambda = array.array('d',[0])
file = TFile("dumL1L1_{0:0.0f}_{1}.root".format(mass*1000,outfile),"recreate")
events1 = events.CloneTree(0)
events2 = events.CloneTree(0)
events3 = events.CloneTree(0)
events1.SetName("ntuple_L1L1")
events2.SetName("ntuple_L1L2")
events3.SetName("ntuple_L2L2")
events.Branch("eleNTrackHits",eleNTrackHits,"eleNTrackHits/I")
events.Branch("posNTrackHits",posNTrackHits,"posNTrackHits/I")
events.Branch("eleTrkLambda",eleTrkLambda,"eleTrkLambda/D")
events.Branch("posTrkLambda",posTrkLambda,"posTrkLambda/D")
nevents = events.GetEntries()
for entry in xrange(nevents):
events.GetEntry(entry)
removehitEle = RemoveHit(events.eleTrkLambda)
removehitPos = RemoveHit(events.posTrkLambda)
if((removehitEle and events.eleNTrackHits == 5) or (removehitPos and events.posNTrackHits == 5)):
continue
elif((removehitEle and events.eleNTrackHits == 6) and (removehitPos and events.posNTrackHits == 6)):
events3.Fill()
elif((removehitEle and events.eleNTrackHits == 6) or (removehitPos and events.posNTrackHits == 6)):
events2.Fill()
elif(not removehitEle and not removehitPos):
events1.Fill()
else:
print("You missed a category for L1L1")
events1.AutoSave()
events2.AutoSave()
events3.AutoSave()
del file
return events1, events2, events3
def NewEventsL1L2(events,mass,outfile):
eleNTrackHits = array.array('d',[0])
posNTrackHits = array.array('d',[0])
eleTrkLambda = array.array('d',[0])
posTrkLambda = array.array('d',[0])
eleHasL1 = array.array('d',[0])
posHasL1 = array.array('d',[0])
file = TFile("dumL1L2_{0:0.0f}_{1}.root".format(mass*1000,outfile),"recreate")
events1 = events.CloneTree(0)
events2 = events.CloneTree(0)
events3 = events.CloneTree(0)
events1.SetName("ntuple_L1L1")
events2.SetName("ntuple_L1L2")
events3.SetName("ntuple_L2L2")
events.Branch("eleNTrackHits",eleNTrackHits,"eleNTrackHits/I")
events.Branch("posNTrackHits",posNTrackHits,"posNTrackHits/I")
events.Branch("eleTrkLambda",eleTrkLambda,"eleTrkLambda/D")
events.Branch("posTrkLambda",posTrkLambda,"posTrkLambda/D")
events.Branch("eleHasL1",eleHasL1,"eleHasL1/I")
events.Branch("posHasL1",posHasL1,"posHasL1/I")
nevents = events.GetEntries()
for entry in xrange(nevents):
events.GetEntry(entry)
removehitEle = False
removehitPos = False
if(events.eleHasL1):
removehitEle = RemoveHit(events.eleTrkLambda)
if(events.posHasL1):
removehitPos = RemoveHit(events.posTrkLambda)
if((removehitEle and events.eleNTrackHits == 5) or (removehitPos and events.posNTrackHits == 5)):
continue
elif(not removehitEle and not removehitPos):
events2.Fill()
elif((removehitEle and events.eleNTrackHits == 6) or (removehitPos and events.posNTrackHits == 6)):
events3.Fill()
else:
print("You missed a category for L1L1")
events1.AutoSave()
events2.AutoSave()
events3.AutoSave()
del file
return events2, events3
def KillHits(events1, events2, events3, mass, inputL2L2ReconFile,outfile):
newevents1, newevents2_L1L1, newevents3_L1L1 = NewEventsL1L1(events1,mass,outfile)
newevents2_L1L2, newevents3_L1L2 = NewEventsL1L2(events2, mass,outfile)
newevents1 = TChain("ntuple_L1L1")
newevents1.Add("dumL1L1_{0:0.0f}_{1}.root".format(mass*1000,outfile))
newevents1.Add("dumL1L2_{0:0.0f}_{1}.root".format(mass*1000,outfile))
newevents2 = TChain("ntuple_L1L2")
newevents2.Add("dumL1L1_{0:0.0f}_{1}.root".format(mass*1000,outfile))
newevents2.Add("dumL1L2_{0:0.0f}_{1}.root".format(mass*1000,outfile))
file = TFile("dumL2L2_{0:0.0f}_{1}.root".format(mass*1000,outfile),"recreate")
eventsL2L2 = events3.CloneTree(0)
eventsL2L2.SetName("ntuple_L2L2")
nevents = events3.GetEntries()
for entry in xrange(nevents):
events3.GetEntry(entry)
eventsL2L2.Fill()
eventsL2L2.AutoSave()
newevents3 = TChain("ntuple_L2L2")
newevents3.Add("dumL1L1_{0:0.0f}_{1}.root".format(mass*1000,outfile))
newevents3.Add("dumL1L2_{0:0.0f}_{1}.root".format(mass*1000,outfile))
newevents3.Add("dumL2L2_{0:0.0f}_{1}.root".format(mass*1000,outfile))
del file
return newevents1, newevents2, newevents3
def openPDF(outfile,canvas):
canvas.Print(outfile+".pdf[")
def closePDF(outfile,canvas):
canvas.Print(outfile+".pdf]")
def CompareHisto(events1,events2,truthevents,nBins,targZ,maxZ,outfileroot,canvas,outfile,mass,title=""):
outfileroot.cd()
canvas.Clear()
events1.Draw("triEndZ>>histo1({0},{1},{2})".format(nBins,targZ,maxZ))
histo1 = ROOT.gROOT.FindObject("histo1")
events2.Draw("triEndZ>>histo2({0},{1},{2})".format(nBins,targZ,maxZ))
histo2 = ROOT.gROOT.FindObject("histo2")
truthevents.Draw("triEndZ>>truthhisto({0},{1},{2})".format(nBins,targZ,maxZ))
truthhisto = ROOT.gROOT.FindObject("truthhisto")
histo1.Sumw2()
histo2.Sumw2()
truthhisto.Sumw2()
histo1.Divide(truthhisto)
histo2.Divide(truthhisto)
histo1.Draw()
histo1.SetStats(0)
histo1.SetTitle("Compare Hit Killing {0:0.0f} MeV A' for {1}".format(mass*1000,title))
histo1.GetXaxis().SetTitle("Truth z (mm)")
histo1.GetYaxis().SetTitle("efficiency")
histo2.SetLineColor(2)
histo2.Draw("same")
legend = TLegend(.58,.66,.92,.87)
legend.SetBorderSize(0)
legend.SetFillColor(0)
legend.SetFillStyle(0)
legend.SetTextFont(42)
legend.SetTextSize(0.035)
legend.AddEntry(histo1,"No L1 Hit Killing","LP")
legend.AddEntry(histo2,"With L1 Hit Killing","LP")
legend.Draw("same")
canvas.Print(outfile+".pdf")
canvas.Write()
def CompareKill(L1L1events,L1L1killevents,L1L2events,L1L2killevents,L2L2events,L2L2killevents,truthevents,nBins,targZ,outfileroot,canvas,outfile,mass):
output = outfile+"_comparekill"
CompareHisto(L1L1events,L1L1killevents,truthevents,nBins,targZ,80,outfileroot,canvas,output,mass,"L1L1")
CompareHisto(L1L2events,L1L2killevents,truthevents,nBins,targZ,90,outfileroot,canvas,output,mass,"L1L2")
CompareHisto(L2L2events,L2L2killevents,truthevents,nBins,targZ,160,outfileroot,canvas,output,mass,"L2L2")
#removedL1Hit={} #dictionary...for c++/java you can use a map or something...
#for fsp in <fspInV0> :
# if isMC and trackKiller and killInTrackSlope :
# track=fsp.getTracks()[0]
# nHits=len(track.getSvtHits())
# slp=track.getTanLambda()
# rndm=random.random()
# ibin=effSlopeData.FindBin(slp)
# eff=1-effSlopeData.GetBinContent(ibin) #the slope "efficiency" is actually an inefficiency
# if rndm>eff:
# if nHits==5:
# print(str(fsp)+':: Removing this particle due to L1 inefficiency')
# nKilled+=1
# continue
# else :
# print(str(fsp)+':: Removing this particle L1 hit due to inefficiency')
# removedL1Hit[fsp]=True
# else:
# print(str(fsp)+':: Leaving this particle alone')
# removedL1Hit[fsp]=False
gROOT.SetBatch(True)
c = TCanvas("c","c",1200,900)
maxZ = targZ + zRange #Define Maximum Z
#Set outfile and grab infile
outfile = remainder[0]
outfileroot = TFile(remainder[0]+"_all.root","RECREATE")
L1L1file = open(remainder[1],"r")
L1L2file = open(remainder[2],"r")
L2L2file = open(remainder[3],"r")
truthfile = open(remainder[4],"r")
L1L1Files = []
L1L2Files = []
L2L2Files = []
truthFiles = []
histosgamma = []
#Read files from L1L1 input text file
for line in (raw.strip().split() for raw in L1L1file):
L1L1Files.append(line[0])
#Read files from L1L2 input text file
for line in (raw.strip().split() for raw in L1L2file):
L1L2Files.append(line[0])
#Read files from L1L1 input text file
for line in (raw.strip().split() for raw in L2L2file):
L2L2Files.append(line[0])
#Read files from input text truth file
for line in (raw.strip().split() for raw in truthfile):
truthFiles.append(line[0])
if (len(truthFiles) != len(L1L1Files) or len(truthFiles) != len(L1L2Files) or len(truthFiles) != len(L2L2Files)):
print "The number of L1L1 files, input files, or truth files do not match!"
print_usage()
sys.exit(0)
mass = array.array('d')
z = array.array('d')
nMass = len(truthFiles)
#Grab values of mass from the truth in the tuple files
for i in range(nMass):
inputTruthFile = TFile(str(truthFiles[i]))
inputTruthFile.Get(tupleName).Draw("triM>>histoMass({0},{1},{2})".format(1000,0,1))
histoMass = ROOT.gROOT.FindObject("histoMass")
mass.append(histoMass.GetMean())
del histoMass
#Build array of z values
for i in range(nBins):
z.append(targZ+i*(maxZ-targZ)/float(nBins))
#Function to fit for normalization
if(fittype == 0):
exppol4=TF1("exppol4","exp(pol2(0))",-5,50)
elif(fittype == 1):
exppol4=TF1("exppol4","exp(pol2(0))",-5,100)
elif(fittype == 2):
exppol4=TF1("exppol4","exp(pol3(0))",-5,50)
elif(fittype == 3):
exppol4=TF1("exppol4","exp(pol3(0))",-5,100)
elif(fittype == 4):
exppol4=TF1("exppol4","exp(pol4(0))",-5,50)
elif(fittype == 5):
exppol4=TF1("exppol4","exp(pol4(0))",-5,100)
elif(fittype == 6):
exppol4=TF1("exppol4","pol3",-5,50)
elif(fittype == 7):
exppol4=TF1("exppol4","pol3",-5,100)
elif(fittype == 8):
exppol4=TF1("exppol4","pol4",-5,50)
elif(fittype == 9):
exppol4=TF1("exppol4","pol4",-5,100)
elif(fittype == 10):
exppol4=TF1("exppol4","pol5",-5,50)
elif(fittype == 11):
exppol4=TF1("exppol4","pol5",-5,100)
elif(fittype == 12):
exppol4=TF1("exppol4","pol6",-5,50)
else:
exppol4=TF1("exppol4","pol6",-5,100)
#Create text files to write to
textfileL1L1 = open(outfile + "_L1L1.eff","w")
textfileL1L1Norm = open(outfile + "_L1L1_norm.eff","w")
textfileL1L1Killed = open(outfile + "_L1L1_kill.eff","w")
textfileL1L1KilledNorm = open(outfile + "_L1L1_kill_norm.eff","w")
textfileL1L2 = open(outfile + "_L1L2.eff","w")
textfileL1L2Norm = open(outfile + "_L1L2_norm.eff","w")
textfileL1L2Killed = open(outfile + "_L1L2_kill.eff","w")
textfileL1L2KilledNorm = open(outfile + "_L1L2_kill_norm.eff","w")
textfileL2L2 = open(outfile + "_L2L2.eff","w")
textfileL2L2Norm = open(outfile + "_L2L2_norm.eff","w")
textfileL2L2Killed = open(outfile + "_L2L2_kill.eff","w")
textfileL2L2KilledNorm = open(outfile + "_L2L2_kill_norm.eff","w")
#Write values of mass in the first row
for i in range(nMass):
textfileL1L1.write(str(mass[i]) + " ")
textfileL1L1Norm.write(str(mass[i]) + " ")
textfileL1L1Killed.write(str(mass[i]) + " ")
textfileL1L1KilledNorm.write(str(mass[i]) + " ")
textfileL1L2.write(str(mass[i]) + " ")
textfileL1L2Norm.write(str(mass[i]) + " ")
textfileL1L2Killed.write(str(mass[i]) + " ")
textfileL1L2KilledNorm.write(str(mass[i]) + " ")
textfileL2L2.write(str(mass[i]) + " ")
textfileL2L2Norm.write(str(mass[i]) + " ")
textfileL2L2Killed.write(str(mass[i]) + " ")
textfileL2L2KilledNorm.write(str(mass[i]) + " ")
textfileL1L1.write("\n")
textfileL1L1Norm.write("\n")
textfileL1L1Killed.write("\n")
textfileL1L1KilledNorm.write("\n")
textfileL1L2.write("\n")
textfileL1L2Norm.write("\n")
textfileL1L2Killed.write("\n")
textfileL1L2KilledNorm.write("\n")
textfileL2L2.write("\n")
textfileL2L2Norm.write("\n")
textfileL2L2Killed.write("\n")
textfileL2L2KilledNorm.write("\n")
#Write values of z in the 2nd row
for i in range(nBins):
textfileL1L1.write(str(z[i]) + " ")
textfileL1L1Norm.write(str(z[i]) + " ")
textfileL1L1Killed.write(str(z[i]) + " ")
textfileL1L1KilledNorm.write(str(z[i]) + " ")
textfileL1L2.write(str(z[i]) + " ")
textfileL1L2Norm.write(str(z[i]) + " ")
textfileL1L2Killed.write(str(z[i]) + " ")
textfileL1L2KilledNorm.write(str(z[i]) + " ")
textfileL2L2.write(str(z[i]) + " ")
textfileL2L2Norm.write(str(z[i]) + " ")
textfileL2L2Killed.write(str(z[i]) + " ")
textfileL2L2KilledNorm.write(str(z[i]) + " ")
textfileL1L1.write("\n")
textfileL1L1Norm.write("\n")
textfileL1L1Killed.write("\n")
textfileL1L1KilledNorm.write("\n")
textfileL1L2.write("\n")
textfileL1L2Norm.write("\n")
textfileL1L2Killed.write("\n")
textfileL1L2KilledNorm.write("\n")
textfileL2L2.write("\n")
textfileL2L2Norm.write("\n")
textfileL2L2Killed.write("\n")
textfileL2L2KilledNorm.write("\n")
L1L1events = []
L1L1killevents = []
L1L2events = []
L1L2killevents = []
L2L2events = []
L2L2killevents = []
eventstruth = []
histosL1L1 = []
histosL1L2 = []
histosL2L2 = []
histosTruth = []
normArr = array.array('d')
histosL1L1kill = []
histosL1L2kill = []
histosL2L2kill = []
normkillArr = array.array('d')
gammamean = array.array('d')
gammameanerror = array.array('d')
zeros = array.array('d')
openPDF(outfile+"_comparekill",c)
openPDF(outfile+"_fitplots",c)
#Loop over all values of mass
for i in range(nMass):
inputL1L1ReconFile = TFile(str(L1L1Files[i])) #L1L1 tuple files after cuts
inputL1L2ReconFile = TFile(str(L1L2Files[i])) #L1L2 tuple files after cuts
inputL2L2ReconFile = TFile(str(L2L2Files[i])) #L2L2 tuple files after cuts
inputTruthFile = TFile(str(truthFiles[i])) #truth files
#L1L1events.append(inputL1L1ReconFile.Get(tupleName))
#L1L2events.append(inputL1L2ReconFile.Get(tupleName))
#L2L2events.append(inputL2L2ReconFile.Get(tupleName))
L1L1events = inputL1L1ReconFile.Get(tupleName)
L1L2events = inputL1L2ReconFile.Get(tupleName)
L2L2events = inputL2L2ReconFile.Get(tupleName)
L1L1killevents, L1L2killevents, L2L2killevents = KillHits(inputL1L1ReconFile.Get(tupleName),inputL1L2ReconFile.Get(tupleName),inputL2L2ReconFile.Get(tupleName),mass[i],L2L2Files[i],outfile)
#L1L1killevents.append(eventsL1L1)
#L1L2killevents.append(eventsL1L2)
#L2L2killevents.append(eventsL2L2)
#L1L1killevents.append(L1L1events[i])
#L1L2killevents.append(L1L2events[i])
#L2L2killevents.append(L2L2events[i])
#L1L1killevents = eventsL1L1
#L1L2killevents = eventsL1L2
#L2L2killevents = eventsL2L2
#L1L1killevents = L1L1events
#L1L2killevents = L1L2events
#L2L2killevents = L2L2events
#eventstruth.append(inputTruthFile.Get(tupleName))
eventstruth = inputTruthFile.Get(tupleName)
CompareKill(L1L1events,L1L1killevents,L1L2events,L1L2killevents,L2L2events,L2L2killevents,eventstruth,nBins,targZ,outfileroot,c,outfile,mass[i])
#closePDF(outfile+"_comparekill",c)
#del eventsL1L1
#del eventsL1L2
#del eventsL2L2
#histosL1L1 = []
#histosL1L2 = []
#histosL2L2 = []
#histosTruth = []
#normArr = []
#for i in range(nMass):
print("Mass {0:0.0f}".format(mass[i]*1000))
L1L1events.Draw("triEndZ>>histoReconL1L1_{3:0.0f}({0},{1},{2})".format(nBins,targZ,maxZ,mass[i]*1000))
#histoReconL1L1 = ROOT.gROOT.FindObject("histoReconL1L1_{0:0.0f}".format(mass[i]*1000))
histosL1L1.append(ROOT.gROOT.FindObject("histoReconL1L1_{0:0.0f}".format(mass[i]*1000)))
L1L2events.Draw("triEndZ>>histoReconL1L2_{3:0.0f}({0},{1},{2})".format(nBins,targZ,maxZ,mass[i]*1000))
#histoReconL1L2 = ROOT.gROOT.FindObject("histoReconL1L2_{0:0.0f}".format(mass[i]*1000))
histosL1L2.append(ROOT.gROOT.FindObject("histoReconL1L2_{0:0.0f}".format(mass[i]*1000)))
L2L2events.Draw("triEndZ>>histoReconL2L2_{3:0.0f}({0},{1},{2})".format(nBins,targZ,maxZ,mass[i]*1000))
#histoReconL2L2 = ROOT.gROOT.FindObject("histoReconL2L2_{0:0.0f}".format(mass[i]*1000))
histosL2L2.append(ROOT.gROOT.FindObject("histoReconL2L2_{0:0.0f}".format(mass[i]*1000)))
eventstruth.Draw("triEndZ>>histoTruth_{3:0.0f}({0},{1},{2})".format(nBins,targZ,maxZ,mass[i]*1000),"triStartP>0.8*{0}".format(eBeam))
#histoTruth = ROOT.gROOT.FindObject("histoTruth".format(mass[i]*1000))
histosTruth.append(ROOT.gROOT.FindObject("histoTruth_{0:0.0f}".format(mass[i]*1000)))
#histoReconL1L1.Sumw2()
#histoReconL1L2.Sumw2()
#histoReconL2L2.Sumw2()
#histoTruth.Sumw2()
histosL1L1[i].Sumw2()
histosL1L2[i].Sumw2()
histosL2L2[i].Sumw2()
histosTruth[i].Sumw2()
outfileroot.cd()
histosL1L1[i].Write("L1L1 {0:0.0f} MeV".format(mass[i]*1000))
histosL1L2[i].Write("L1L2 {0:0.0f} MeV".format(mass[i]*1000))
histosL2L2[i].Write("L2L2 {0:0.0f} MeV".format(mass[i]*1000))
histosTruth[i].Write("Truth {0:0.0f} MeV".format(mass[i]*1000))
#Find the normalization based on a certain number of bins
norm = plotFit(histosL1L1[i],histosL1L2[i],histosL2L2[i],histosTruth[i],normArr,outfile+"_fitplots",outfileroot,c,mass[i],targZ,title="Without Hit Killing")
#norm = 0.0
#for j in range(nNorm):
#if (histoTruth.GetBinContent(j+1) != 0):
# norm += histoReconL1L1.GetBinContent(j+1)/histoTruth.GetBinContent(j+1)
# if (histosTruth[i].GetBinContent(j+1) != 0):
# norm += histosL1L1[i].GetBinContent(j+1)/histosTruth[i].GetBinContent(j+1)
# else:
# norm = 0.0
# break
#norm = norm/nNorm
print norm
normArr.append(norm)
#Write the efficiency for a given mass (row) as function of z
for j in range(nBins):
if (histosTruth[i].GetBinContent(j+1) == 0):
textfileL1L1.write("0.0 ")
textfileL1L1Norm.write("0.0 ")
textfileL1L2.write("0.0 ")
textfileL1L2Norm.write("0.0 ")
textfileL2L2.write("0.0 ")
textfileL2L2Norm.write("0.0 ")
else:
#textfileL1L1.write(str(histoReconL1L1.GetBinContent(j+1)/histoTruth.GetBinContent(j+1)) + " ")
#textfileL1L2.write(str(histoReconL1L2.GetBinContent(j+1)/histoTruth.GetBinContent(j+1)) + " ")
#textfileL2L2.write(str(histoReconL2L2.GetBinContent(j+1)/histoTruth.GetBinContent(j+1)) + " ")
textfileL1L1.write(str(histosL1L1[i].GetBinContent(j+1)/histosTruth[i].GetBinContent(j+1)) + " ")
textfileL1L2.write(str(histosL1L2[i].GetBinContent(j+1)/histosTruth[i].GetBinContent(j+1)) + " ")
textfileL2L2.write(str(histosL2L2[i].GetBinContent(j+1)/histosTruth[i].GetBinContent(j+1)) + " ")
if(norm != 0):
#textfileL1L1Norm.write(str(histoReconL1L1.GetBinContent(j+1)/(histoTruth.GetBinContent(j+1)*norm)) + " ")
#textfileL1L2Norm.write(str(histoReconL1L2.GetBinContent(j+1)/(histoTruth.GetBinContent(j+1)*norm)) + " ")
#textfileL2L2Norm.write(str(histoReconL2L2.GetBinContent(j+1)/(histoTruth.GetBinContent(j+1)*norm)) + " ")
textfileL1L1Norm.write(str(histosL1L1[i].GetBinContent(j+1)/(histosTruth[i].GetBinContent(j+1)*norm)) + " ")
textfileL1L2Norm.write(str(histosL1L2[i].GetBinContent(j+1)/(histosTruth[i].GetBinContent(j+1)*norm)) + " ")
textfileL2L2Norm.write(str(histosL2L2[i].GetBinContent(j+1)/(histosTruth[i].GetBinContent(j+1)*norm)) + " ")
else:
textfileL1L1Norm.write("0.0 ")
textfileL1L2Norm.write("0.0 ")
textfileL2L2Norm.write("0.0 ")
textfileL1L1.write("\n")
textfileL1L1Norm.write("\n")
textfileL1L2.write("\n")
textfileL1L2Norm.write("\n")
textfileL2L2.write("\n")
textfileL2L2Norm.write("\n")
L1L1events.Draw("triStartP/({4})>>gammahisto_{3:0.0f}({0},{1},{2})".format(nBins,0.8,1.,mass[i]*1000,eBeam))
histosgamma.append(ROOT.gROOT.FindObject("gammahisto_{0:0.0f}".format(mass[i]*1000)))
gammamean.append(histosgamma[i].GetMean())
print(histosgamma[i].GetMean())
gammameanerror.append(histosgamma[i].GetMeanError())
zeros.append(0.)
#textfileL1L1.close()
#textfileL1L1Norm.close()
#textfileL1L2.close()
#textfileL1L2Norm.close()
#textfileL2L2.close()
#textfileL2L2Norm.close()
#histosL1L1kill = []
#histosL1L2kill = []
#histosL2L2kill = []
#normkillArr = []
#for i in range(nMass):
L1L1killevents.Draw("triEndZ>>histoReconL1L1_kill_{3:0.0f}({0},{1},{2})".format(nBins,targZ,maxZ,mass[i]*1000))
#histoReconL1L1 = ROOT.gROOT.FindObject("histoReconL1L1")
histosL1L1kill.append(ROOT.gROOT.FindObject("histoReconL1L1_kill_{0:0.0f}".format(mass[i]*1000)))
L1L2killevents.Draw("triEndZ>>histoReconL1L2_kill_{3:0.0f}({0},{1},{2})".format(nBins,targZ,maxZ,mass[i]*1000))
#histoReconL1L2 = ROOT.gROOT.FindObject("histoReconL1L2")
histosL1L2kill.append(ROOT.gROOT.FindObject("histoReconL1L2_kill_{0:0.0f}".format(mass[i]*1000)))
L2L2killevents.Draw("triEndZ>>histoReconL2L2_kill_{3:0.0f}({0},{1},{2})".format(nBins,targZ,maxZ,mass[i]*1000))
#histoReconL2L2 = ROOT.gROOT.FindObject("histoReconL2L2")
histosL2L2kill.append(ROOT.gROOT.FindObject("histoReconL2L2_kill_{0:0.0f}".format(mass[i]*1000)))
#eventstruth[i].Draw("triEndZ>>histoTruth({0},{1},{2})".format(nBins,targZ,maxZ),"triStartP>0.8*{0}".format(eBeam))
#histoTruth = ROOT.gROOT.FindObject("histoTruth")
#histoReconL1L1.Sumw2()
#histoReconL1L2.Sumw2()
#histoReconL2L2.Sumw2()
#histoTruth.Sumw2()
histosL1L1kill[i].Sumw2()
histosL1L2kill[i].Sumw2()
histosL2L2kill[i].Sumw2()
outfileroot.cd()
histosL1L1kill[i].Write("L1L1 {0:0.0f} MeV Hit Killed".format(mass[i]*1000))
histosL1L2kill[i].Write("L1L2 {0:0.0f} MeV Hit Killed".format(mass[i]*1000))
histosL2L2kill[i].Write("L2L2 {0:0.0f} MeV Hit Killed".format(mass[i]*1000))
#Find the normalization based on a certain number of bins
normKill = plotFit(histosL1L1kill[i],histosL1L2kill[i],histosL2L2kill[i],histosTruth[i],normArr,outfile+"_fitplots",outfileroot,c,mass[i],targZ,title="With Hit Killing")
#norm = 0.0
#for j in range(nNorm):
#if (histoTruth.GetBinContent(j+1) != 0):
# norm += histoReconL1L1.GetBinContent(j+1)/histoTruth.GetBinContent(j+1)
# if (histosTruth[i].GetBinContent(j+1) != 0):
# norm += (histosL1L1kill[i].GetBinContent(j+1)+histosL1L2kill[i].GetBinContent(j+1)+histosL2L2kill[i].GetBinContent(j+1))/histosTruth[i].GetBinContent(j+1)
# else:
# norm = 0.0
# break
#norm = norm/nNorm
print normKill
normkillArr.append(normKill)
#Write the efficiency for a given mass (row) as function of z
for j in range(nBins):
if (histosTruth[i].GetBinContent(j+1) == 0):
textfileL1L1Killed.write("0.0 ")
textfileL1L1KilledNorm.write("0.0 ")
textfileL1L2Killed.write("0.0 ")
textfileL1L2KilledNorm.write("0.0 ")
textfileL2L2Killed.write("0.0 ")
textfileL2L2KilledNorm.write("0.0 ")
else:
#textfileL1L1Killed.write(str(histoReconL1L1.GetBinContent(j+1)/histoTruth.GetBinContent(j+1)) + " ")
#textfileL1L2Killed.write(str(histoReconL1L2.GetBinContent(j+1)/histoTruth.GetBinContent(j+1)) + " ")
#textfileL2L2Killed.write(str(histoReconL2L2.GetBinContent(j+1)/histoTruth.GetBinContent(j+1)) + " ")
textfileL1L1Killed.write(str(histosL1L1kill[i].GetBinContent(j+1)/histosTruth[i].GetBinContent(j+1)) + " ")
textfileL1L2Killed.write(str(histosL1L2kill[i].GetBinContent(j+1)/histosTruth[i].GetBinContent(j+1)) + " ")
textfileL2L2Killed.write(str(histosL2L2kill[i].GetBinContent(j+1)/histosTruth[i].GetBinContent(j+1)) + " ")
if(norm != 0):
#textfileL1L1KilledNorm.write(str(histoReconL1L1.GetBinContent(j+1)/(histoTruth.GetBinContent(j+1)*norm)) + " ")
#textfileL1L2KilledNorm.write(str(histoReconL1L2.GetBinContent(j+1)/(histoTruth.GetBinContent(j+1)*norm)) + " ")
#textfileL2L2KilledNorm.write(str(histoReconL2L2.GetBinContent(j+1)/(histoTruth.GetBinContent(j+1)*norm)) + " ")
textfileL1L1KilledNorm.write(str(histosL1L1kill[i].GetBinContent(j+1)/(histosTruth[i].GetBinContent(j+1)*norm)) + " ")
textfileL1L2KilledNorm.write(str(histosL1L2kill[i].GetBinContent(j+1)/(histosTruth[i].GetBinContent(j+1)*norm)) + " ")
textfileL2L2KilledNorm.write(str(histosL2L2kill[i].GetBinContent(j+1)/(histosTruth[i].GetBinContent(j+1)*norm)) + " ")
else:
textfileL1L1KilledNorm.write("0.0 ")
textfileL1L2KilledNorm.write("0.0 ")
textfileL2L2KilledNorm.write("0.0 ")
textfileL1L1Killed.write("\n")
textfileL1L1KilledNorm.write("\n")
textfileL1L2Killed.write("\n")
textfileL1L2KilledNorm.write("\n")
textfileL2L2Killed.write("\n")
textfileL2L2KilledNorm.write("\n")
del L1L1events
del L1L2events
del L2L2events
del L1L1killevents
del L1L2killevents
del L2L2killevents
del eventstruth
del inputL1L1ReconFile
del inputL1L2ReconFile
del inputL2L2ReconFile
del inputTruthFile
textfileL1L1.close()
textfileL1L1Norm.close()
textfileL1L2.close()
textfileL1L2Norm.close()
textfileL2L2.close()
textfileL2L2Norm.close()
textfileL1L1Killed.close()
textfileL1L1KilledNorm.close()
textfileL1L2Killed.close()
textfileL1L2KilledNorm.close()
textfileL2L2Killed.close()
textfileL2L2KilledNorm.close()
closePDF(outfile+"_comparekill",c)
closePDF(outfile+"_fitplots",c)
#Make test plots if desired
if(makeTestPlots):
#Make Absolute Efficiency Plots
c1 = TCanvas("c","c",1200,900)
c1.Print(outfile+"_L1L1.pdf[")
for i in range(1,nMass-1):
plotTest(i,outfile+"_L1L1.eff",outfile,targZ,maxZ,c1)
c1.Print(outfile+"_L1L1.pdf]")
del c1
#Make Normalized Efficiency Plots
c2 = TCanvas("c","c",1200,900)
c2.Print(outfile+"_L1L1_norm.pdf[")
for i in range(1,nMass-1):
plotTest(i,outfile+"_L1L1_norm.eff",outfileNorm,targZ,maxZ,c2)
c2.Print(outfile+"_L1L1_norm.pdf]")
del c2
#Make Absolute Efficiency Plots
c3 = TCanvas("c","c",1200,900)
c3.Print(outfile+"_L1L1_kill.pdf[")
for i in range(1,nMass-1):
plotTest(i,outfile+"_L1L1_kill.eff",outfile,targZ,maxZ,c3)
c3.Print(outfile+"_L1L1_kill.pdf]")
del c3
#Make Normalized Efficiency Plots
c4 = TCanvas("c","c",1200,900)
c4.Print(outfile+"_L1L1_kill_norm.pdf[")
for i in range(1,nMass-1):
plotTest(i,outfile+"_L1L1_kill_norm.eff",outfileNorm,targZ,maxZ,c4)
c4.Print(outfile+"_L1L1_kill_norm.pdf]")
del c4
#Make Absolute Efficiency Plots
c5 = TCanvas("c","c",1200,900)
c5.Print(outfile+"_L1L2.pdf[")
for i in range(1,nMass-1):
plotTest(i,outfile+"_L1L2.eff",outfile,targZ,maxZ,c5)
c5.Print(outfile+"_L1L2.pdf]")
del c5
#Make Normalized Efficiency Plots
c6 = TCanvas("c","c",1200,900)
c6.Print(outfile+"_L1L2_norm.pdf[")
for i in range(1,nMass-1):
plotTest(i,outfile+"_L1L2_norm.eff",outfileNorm,targZ,maxZ,c6)
c6.Print(outfile+"_L1L2_norm.pdf]")
del c6
#Make Absolute Efficiency Plots
c7 = TCanvas("c","c",1200,900)
c7.Print(outfile+"_L1L2_kill.pdf[")
for i in range(1,nMass-1):
plotTest(i,outfile+"_L1L2_kill.eff",outfile,targZ,maxZ,c7)
c7.Print(outfile+"_L1L2_kill.pdf]")
del c7
#Make Normalized Efficiency Plots
c8 = TCanvas("c","c",1200,900)
c8.Print(outfile+"_L1L2_kill_norm.pdf[")
for i in range(1,nMass-1):
plotTest(i,outfile+"_L1L2_kill_norm.eff",outfileNorm,targZ,maxZ,c8)
c8.Print(outfile+"_L1L2_kill_norm.pdf]")
del c8
#Make Absolute Efficiency Plots
c9 = TCanvas("c","c",1200,900)
c9.Print(outfile+"_L2L2.pdf[")
for i in range(1,nMass-1):
plotTest(i,outfile+"_L2L2.eff",outfile,targZ,maxZ,c9)
c9.Print(outfile+"_L2L2.pdf]")
del c9
#Make Normalized Efficiency Plots
c10 = TCanvas("c","c",1200,900)
c10.Print(outfile+"_L2L2_norm.pdf[")
for i in range(1,nMass-1):
plotTest(i,outfile+"_L2L2_norm.eff",outfileNorm,targZ,maxZ,c10)
c10.Print(outfile+"_L2L2_norm.pdf]")
del c10
#Make Absolute Efficiency Plots
c11 = TCanvas("c","c",1200,900)
c11.Print(outfile+"_L2L2_kill.pdf[")
for i in range(1,nMass-1):
plotTest(i,outfile+"_L2L2_kill.eff",outfile,targZ,maxZ,c11)
c11.Print(outfile+"_L2L2_kill.pdf]")
del c11
#Make Normalized Efficiency Plots
c12 = TCanvas("c","c",1200,900)
c12.Print(outfile+"_L2L2_kill_norm.pdf[")
for i in range(1,nMass-1):
plotTest(i,outfile+"_L2L2_kill_norm.eff",outfileNorm,targZ,maxZ,c12)
c12.Print(outfile+"_L2L2_kill_norm.pdf]")
del c12
c13 = TCanvas("c","c",1200,900)
#plotEff(outfile+"_L1L1.eff",outfile+"_L1L1",nBins,targZ,maxZ,c13)
#plotEff(outfile+"_L1L1_norm.eff",outfile+"_L1L1_norm",nBins,targZ,maxZ,c13)
#plotEff(outfile+"_L1L1_kill.eff",outfile+"_L1L1_kill",nBins,targZ,maxZ,c13)
#plotEff(outfile+"_L1L1_kill_norm.eff",outfile+"_L1L1_kill_norm",nBins,targZ,maxZ,c13)
#plotEff(outfile+"_L1L2.eff",outfile+"_L1L2",nBins,targZ,maxZ,c13)
#plotEff(outfile+"_L1L2_norm.eff",outfile+"_L1L2_norm",nBins,targZ,maxZ,c13)
#plotEff(outfile+"_L1L2_kill.eff",outfile+"_L1L2_kill",nBins,targZ,maxZ,c13)
#plotEff(outfile+"_L1L2_kill_norm.eff",outfile+"_L1L2_kill_norm",nBins,targZ,maxZ,c13)
#plotEff(outfile+"_L2L2.eff",outfile+"_L2L2",nBins,targZ,maxZ,c13)
#plotEff(outfile+"_L2L2_norm.eff",outfile+"_L2L2_norm",nBins,targZ,maxZ,c13)
#plotEff(outfile+"_L2L2_kill.eff",outfile+"_L2L2_kill",nBins,targZ,maxZ,c13)
#plotEff(outfile+"_L2L2_kill_norm.eff",outfile+"_L2L2_kill_norm",nBins,targZ,maxZ,c13)
c13.Print(outfile+"_plots.pdf[")
plotEff2(histosL1L1,histosTruth,normArr,outfile+"_L1L1",outfile+"_plots",outfileroot,c13,mass,False,title="L1L1")
plotEff2(histosL1L1,histosTruth,normArr,outfile+"_L1L1_norm",outfile+"_plots",outfileroot,c13,mass,True,title="L1L1 Normalized")
plotEff2(histosL1L1kill,histosTruth,normkillArr,outfile+"_L1L1_kill",outfile+"_plots",outfileroot,c13,mass,False,title="L1L1 Hit Killed")
plotEff2(histosL1L1kill,histosTruth,normkillArr,outfile+"_L1L1_kill_norm",outfile+"_plots",outfileroot,c13,mass,True,title="L1L1 Normalized Hit Killed")
plotEff2(histosL1L2,histosTruth,normArr,outfile+"_L1L2",outfile+"_plots",outfileroot,c13,mass,False,title="L1L2")
plotEff2(histosL1L2,histosTruth,normArr,outfile+"_L1L2_norm",outfile+"_plots",outfileroot,c13,mass,True,title="L2L2 Normalized")
plotEff2(histosL1L2kill,histosTruth,normkillArr,outfile+"_L1L2_kill",outfile+"_plots",outfileroot,c13,mass,False,title="L1L2 Hit Killed")
plotEff2(histosL1L2kill,histosTruth,normkillArr,outfile+"_L1L2_kill_norm",outfile+"_plots",outfileroot,c13,mass,True,title="L1L2 Normalized Hit Killed")
plotEff2(histosL2L2,histosTruth,normArr,outfile+"_L2L2",outfile+"_plots",outfileroot,c13,mass,False,title="L2L2")
plotEff2(histosL2L2,histosTruth,normArr,outfile+"_L2L2_norm",outfile+"_plots",outfileroot,c13,mass,True,title="L2L2 Normalized")
plotEff2(histosL2L2kill,histosTruth,normkillArr,outfile+"_L2L2_kill",outfile+"_plots",outfileroot,c13,mass,False,title="L2L2 Hit Killed")
plotEff2(histosL2L2kill,histosTruth,normkillArr,outfile+"_L2L2_kill_norm",outfile+"_plots",outfileroot,c13,mass,True,title="L2L2 Normalized Hit Killed")
plotAll(histosL1L1,histosL1L2,histosL2L2,histosTruth,normArr,outfile+"_all",outfile+"_plots",outfileroot,c13,mass)
plotAll(histosL1L1kill,histosL1L2kill,histosL2L2kill,histosTruth,normkillArr,outfile+"_kill_all",outfile+"_plots",outfileroot,c13,mass,title="With Hit Killing")
passed.Sumw2()
total.Sumw2()
passed.Divide(total)
passed.SetTitle("L1 Hit Efficiency")
passed.GetXaxis().SetTitle("Track Slope")
passed.GetYaxis().SetTitle("Efficiency")
passed.SetStats(0)
passed.Draw()
c13.Print(outfile+"_plots.pdf")
passed.Write("Efficiency")
graph = TGraphErrors(len(mass),mass,gammamean,zeros,gammameanerror)
graph.SetTitle("A' Truth Energy / E_{beam}")
graph.GetXaxis().SetTitle("Truth Mass (GeV)")
graph.GetYaxis().SetTitle("Fraction of E_{beam}")
graph.GetXaxis().SetRangeUser(0,.2)
graph.GetYaxis().SetRangeUser(0.9,1.0)
graph.Draw("AP")
c13.Print(outfile+"_plots.pdf")
graph.Write("Gamma")
def MakeGammaHistos(histo,mass,canvas,output):
histo.SetTitle("{0:0.0f}".format(mass) + " MeV A' Truth Energy / E_{beam}")
histo.GetXaxis().SetTitle("Truth Energy /E_{beam}")
histo.GetYaxis().SetTitle("")
histo.Sumw2()
histo.SetStats(0)
histo.Draw()
canvas.Print(output+".pdf")
histo.Write("{0} MeV A' Energy".format(mass))
for i in range(len(mass)):
MakeGammaHistos(histosgamma[i],mass[i]*1000,c13,outfile+"_plots")
graph = TGraph(len(mass),mass,normArr)
graph2 = TGraph(len(mass),mass,normkillArr)
graph.SetTitle("Prompt A' Acceptance * Efficiency")
graph.GetXaxis().SetTitle("Truth Mass (GeV)")
graph.GetYaxis().SetTitle("Efficiency")
graph.GetXaxis().SetRangeUser(0,.2)
graph.GetYaxis().SetRangeUser(0,0.4)
graph.SetLineColor(1)
graph.SetMarkerColor(1)
graph2.SetLineColor(2)
graph2.SetMarkerColor(2)
graph.Draw("AP*")
graph2.Draw("P* same")
legend = TLegend(.58,.66,.92,.87)
legend.SetBorderSize(0)
legend.SetFillColor(0)
legend.SetFillStyle(0)
legend.SetTextFont(42)
legend.SetTextSize(0.035)
legend.AddEntry(graph,"No L1 Hit Killing","LP")
legend.AddEntry(graph2,"With L1 Hit Killing","LP")
legend.Draw("same")
c13.Print(outfile+"_plots.pdf")
graph.Write("Prompt Acceptance")
graph2.Write("Prompt Acceptance With Hit Killing")
c13.Print(outfile+"_plots.pdf]")
outfileroot.Close()
|
[
"mrsolt@stanford.edu"
] |
mrsolt@stanford.edu
|
836f316ea7373f660f670c441dd647f0fbff730c
|
6eb9078ce34bed9c895b821aae30f97bcc50ea7d
|
/前端第二课:Django/muggle/blog/admin.py
|
1eb06d37c9dd952bfd268205ff4916ceb34a5fa1
|
[] |
no_license
|
yinsendemogui/Atomspace
|
e7b9e24f8e541f57bdbae2e4d935a3b67133bc69
|
1053d7e3e71365f6acca99431c2d4295243d3df1
|
refs/heads/master
| 2020-06-11T19:24:53.123006
| 2016-12-19T16:53:51
| 2016-12-19T16:53:51
| 75,628,559
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 922
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from django.contrib import admin
from blog.models import UserProfile, Topic, Question, Answer, Comment,Ticket
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.auth.models import User
# Register your models here.
class UserProfileInline(admin.StackedInline):
model = UserProfile
can_delete = False
verbose_name_plural = 'UserProfile'
class TicketInline(admin.StackedInline):
model = Ticket
can_delete = False
verbose_name_plural = 'Ticket'
class UserAdmin(BaseUserAdmin):
inlines = (UserProfileInline,TicketInline )
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
admin.site.register(UserProfile)
admin.site.register(Topic)
admin.site.register(Question)
admin.site.register(Answer)
admin.site.register(Comment)
admin.site.register(Ticket)
# 超级管理员账号密码: Admin/Admin123456
|
[
"215379068@qq.com"
] |
215379068@qq.com
|
de9fb2ae97800642eda88142b72a772c8c9c47c5
|
2b14ef0b8c086a2dd047f6fab6f565f27c3634c6
|
/BOJ/브루트포스/호석이두마리치킨.py
|
48feda541fda601099cc09a058365b61117ed8f8
|
[] |
no_license
|
psj8532/problem_solving
|
055475bbdc8902ed4d19fd242d95dff461cc9608
|
8ae06fc935c3d0a3c5ec537f13677b0534869df3
|
refs/heads/master
| 2023-06-09T10:16:01.248293
| 2021-05-07T03:09:38
| 2021-05-07T03:09:38
| 240,618,744
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,312
|
py
|
from itertools import combinations
from _collections import deque
def bfs(here, depth, chicken_house):
if here in chicken_house: return 0
deq = deque([[here, depth]])
visited = [False] * (N + 1)
visited[here] = True
while deq:
h, d = deq.popleft()
if h in chicken_house:
return 2 * d
for next in adj[h]:
if not visited[next]:
visited[next] = True
deq.append([next, d + 1])
CHICKEN_CNT = 2
answer = [0,0,9876543210]
N, M = map(int,input().split())
adj = {i:[] for i in range(N+1)}
for _ in range(M):
start, end = map(int,input().split())
adj[start].append(end)
adj[end].append(start)
perm = list(map(list, combinations(range(1, N + 1), CHICKEN_CNT)))
for chicken_list in perm:
chicken_house = sorted(chicken_list)
distance = 0
for house in range(1, N + 1):
distance += bfs(house, 0 , chicken_house)
if distance == answer[2] and chicken_house[0] <= answer[0]:
if (chicken_house[0] == answer[0] and chicken_house[1] < answer[1]) or (chicken_house[0] < answer[0]):
answer[0], answer[1] = chicken_house[0], chicken_house[1]
elif distance < answer[2]:
answer[0], answer[1], answer[2] = chicken_house[0], chicken_house[1], distance
print(*answer)
|
[
"psj8532@naver.com"
] |
psj8532@naver.com
|
e719c824c41b673746fdbfe99ee5d27001eb7e45
|
5f92dd6164c41e5756842da0a053b207005be252
|
/tests/models/test_dagcode.py
|
6f3c5d64cdf66aa29dc7df109dcd370d4f0fa3d5
|
[
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] |
permissive
|
mans2singh/airflow
|
24d17446457ebfbf17850d336722f79e46b06404
|
de9633f93a366ebc0a46d1ec4df2c4aa9a18357d
|
refs/heads/main
| 2023-03-20T17:56:18.506101
| 2022-10-22T19:41:57
| 2022-10-22T19:41:57
| 168,956,212
| 0
| 0
|
Apache-2.0
| 2019-02-03T14:51:24
| 2019-02-03T14:51:23
| null |
UTF-8
|
Python
| false
| false
| 6,409
|
py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from datetime import timedelta
from unittest.mock import patch
import pytest
from airflow import AirflowException, example_dags as example_dags_module
from airflow.models import DagBag
from airflow.models.dagcode import DagCode
# To move it to a shared module.
from airflow.utils.file import open_maybe_zipped
from airflow.utils.session import create_session
from tests.test_utils.db import clear_db_dag_code
def make_example_dags(module):
"""Loads DAGs from a module for test."""
dagbag = DagBag(module.__path__[0])
return dagbag.dags
class TestDagCode:
"""Unit tests for DagCode."""
def setup_method(self):
clear_db_dag_code()
def teardown_method(self):
clear_db_dag_code()
def _write_two_example_dags(self):
example_dags = make_example_dags(example_dags_module)
bash_dag = example_dags['example_bash_operator']
DagCode(bash_dag.fileloc).sync_to_db()
xcom_dag = example_dags['example_xcom']
DagCode(xcom_dag.fileloc).sync_to_db()
return [bash_dag, xcom_dag]
def _write_example_dags(self):
example_dags = make_example_dags(example_dags_module)
for dag in example_dags.values():
dag.sync_to_db()
return example_dags
def test_sync_to_db(self):
"""Dg code can be written into database."""
example_dags = self._write_example_dags()
self._compare_example_dags(example_dags)
def test_bulk_sync_to_db(self):
"""Dg code can be bulk written into database."""
example_dags = make_example_dags(example_dags_module)
files = [dag.fileloc for dag in example_dags.values()]
with create_session() as session:
DagCode.bulk_sync_to_db(files, session=session)
session.commit()
self._compare_example_dags(example_dags)
def test_bulk_sync_to_db_half_files(self):
"""Dg code can be bulk written into database."""
example_dags = make_example_dags(example_dags_module)
files = [dag.fileloc for dag in example_dags.values()]
half_files = files[: int(len(files) / 2)]
with create_session() as session:
DagCode.bulk_sync_to_db(half_files, session=session)
session.commit()
with create_session() as session:
DagCode.bulk_sync_to_db(files, session=session)
session.commit()
self._compare_example_dags(example_dags)
@patch.object(DagCode, 'dag_fileloc_hash')
def test_detecting_duplicate_key(self, mock_hash):
"""Dag code detects duplicate key."""
mock_hash.return_value = 0
with pytest.raises(AirflowException):
self._write_two_example_dags()
def _compare_example_dags(self, example_dags):
with create_session() as session:
for dag in example_dags.values():
if dag.is_subdag:
dag.fileloc = dag.parent_dag.fileloc
assert DagCode.has_dag(dag.fileloc)
dag_fileloc_hash = DagCode.dag_fileloc_hash(dag.fileloc)
result = (
session.query(DagCode.fileloc, DagCode.fileloc_hash, DagCode.source_code)
.filter(DagCode.fileloc == dag.fileloc)
.filter(DagCode.fileloc_hash == dag_fileloc_hash)
.one()
)
assert result.fileloc == dag.fileloc
with open_maybe_zipped(dag.fileloc, 'r') as source:
source_code = source.read()
assert result.source_code == source_code
def test_code_can_be_read_when_no_access_to_file(self):
"""
Test that code can be retrieved from DB when you do not have access to Code file.
Source Code should at least exist in one of DB or File.
"""
example_dag = make_example_dags(example_dags_module).get('example_bash_operator')
example_dag.sync_to_db()
# Mock that there is no access to the Dag File
with patch('airflow.models.dagcode.open_maybe_zipped') as mock_open:
mock_open.side_effect = FileNotFoundError
dag_code = DagCode.get_code_by_fileloc(example_dag.fileloc)
for test_string in ['example_bash_operator', 'also_run_this', 'run_this_last']:
assert test_string in dag_code
def test_db_code_updated_on_dag_file_change(self):
"""Test if DagCode is updated in DB when DAG file is changed"""
example_dag = make_example_dags(example_dags_module).get('example_bash_operator')
example_dag.sync_to_db()
with create_session() as session:
result = session.query(DagCode).filter(DagCode.fileloc == example_dag.fileloc).one()
assert result.fileloc == example_dag.fileloc
assert result.source_code is not None
with patch('airflow.models.dagcode.os.path.getmtime') as mock_mtime:
mock_mtime.return_value = (result.last_updated + timedelta(seconds=1)).timestamp()
with patch('airflow.models.dagcode.DagCode._get_code_from_file') as mock_code:
mock_code.return_value = "# dummy code"
example_dag.sync_to_db()
with create_session() as session:
new_result = session.query(DagCode).filter(DagCode.fileloc == example_dag.fileloc).one()
assert new_result.fileloc == example_dag.fileloc
assert new_result.source_code == "# dummy code"
assert new_result.last_updated > result.last_updated
|
[
"noreply@github.com"
] |
mans2singh.noreply@github.com
|
2f3246645ae2b648b8a03a0fbdc252ec71da5335
|
119646d6e1f13582c577fd7b87c9654839a0b806
|
/hubspot/cms/domains/api/__init__.py
|
b4e60c2edbcb4bf654f47d3f798df161efd090c9
|
[] |
permissive
|
HubSpot/hubspot-api-python
|
446daaceeb3a6ce27edcd0414603c6d4bc07e327
|
d51a64c413461c0b82d8a41743e752d878747ca1
|
refs/heads/master
| 2023-08-31T09:52:56.583803
| 2023-08-07T11:00:27
| 2023-08-07T11:00:27
| 248,865,684
| 227
| 98
|
Apache-2.0
| 2023-09-14T15:25:19
| 2020-03-20T22:41:24
|
Python
|
UTF-8
|
Python
| false
| false
| 146
|
py
|
from __future__ import absolute_import
# flake8: noqa
# import apis into api package
from hubspot.cms.domains.api.domains_api import DomainsApi
|
[
"atanasiuk@hubspot.com"
] |
atanasiuk@hubspot.com
|
d09be6ac32398bbd0397467b2ebf8504d775e116
|
356740062993a5967717098a7a3ee78ac6c6cf3f
|
/chapter15/projects/brute_force_pdf/pdf_password_breaker.py
|
814c051fc597e336a223e62a06f1d762f59c9ba6
|
[] |
no_license
|
xerifeazeitona/autbor
|
79588302f14c0c09b1f9f57fcb973e656ee1da5c
|
c37ccbfa87c1ac260e728a3a91a8f2be97978f04
|
refs/heads/main
| 2023-04-03T18:01:34.588984
| 2021-04-07T17:59:26
| 2021-04-07T17:59:26
| 348,749,618
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,528
|
py
|
"""
Brute-Force PDF Password Breaker
Say you have an encrypted PDF that you have forgotten the password to,
but you remember it was a single English word. Trying to guess your
forgotten password is quite a boring task.
Instead you can write a program that will decrypt the PDF by trying
every possible English word until it finds one that works.
The text file dictionary.txt contains over 44,000 English words. Using
the file-reading skills you learned in Chapter 9, create a list of word
strings by reading this file.
Then loop over each word in this list, passing it to the decrypt()
method.
If this method returns the integer 0, the password was wrong and your
program should continue to the next password.
If decrypt() returns 1, then your program should break out of the loop
and print the hacked password.
You should try both the uppercase and lowercase form of each word.
"""
import sys
import PyPDF2
# Open PDF file
pdf_reader = PyPDF2.PdfFileReader(open('encrypted.pdf', 'rb'))
# Read dictionary words from text file
with open('dictionary.txt') as file_object:
wordlist = file_object.read().split('\n')
# try to brute-force the password
total_words = len(wordlist)
for i, word in enumerate(wordlist):
print(f'\r{i} / {total_words}', end='')
if pdf_reader.decrypt(word) == 1:
print(f'\nPassword found: "{word}"')
sys.exit()
if pdf_reader.decrypt(word.lower()) == 1:
print(f'\nPassword found: "{word.lower()}"')
sys.exit()
print('\nCould not find a valid password.')
|
[
"juliano.amaral@gmail.com"
] |
juliano.amaral@gmail.com
|
b9f7406f059d850f23f3e542f6e85d3ad59ee508
|
cd555725b300579d44c0bd3f6fc8f6a968912dfb
|
/UF2/Practica 25/Ejercicio4/main.py
|
b5ab8a5b60b80fa92c11bf55fc828788bc6704fe
|
[] |
no_license
|
aleexnl/aws-python
|
2da5d8a416927f381618f1d6076d98d5e35b3b5e
|
03fce7744b443b2b59a02c261067ecae46ecc3d9
|
refs/heads/master
| 2022-11-24T08:58:24.686651
| 2020-04-18T15:58:32
| 2020-04-18T15:58:32
| 221,772,677
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 158
|
py
|
from module import functions as fc # Llamamos al modulo que contiene las funciones
print(fc.mcd(30, 3)) # Llamamos a la funcion y le pasamos dos parametros
|
[
"alex.nieto0027@gmail.com"
] |
alex.nieto0027@gmail.com
|
ff3e197365e7f7fe3ed5a2b0db876a287e1fc0d4
|
1b9523e970edace631326d652c1ac88bbc5e4921
|
/Launchkey/Launchkey.py
|
3ba423fd3d56201b2b02de22f6b9c4a41ce5e104
|
[] |
no_license
|
pinterre/AbletonLive9_RemoteScripts
|
3d612d9620109125eb127a6119b067e3b21d74ab
|
760c2991d39b5b4288ca0c012c12f2036556a4ef
|
refs/heads/master
| 2020-12-25T03:21:37.827211
| 2014-06-04T19:59:52
| 2014-06-04T19:59:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,587
|
py
|
#Embedded file name: /Users/versonator/Jenkins/live/Projects/AppLive/Resources/MIDI Remote Scripts/Launchkey/Launchkey.py
from __future__ import with_statement
import Live
from _Framework.ControlSurface import ControlSurface
from _Framework.InputControlElement import InputControlElement, MIDI_CC_TYPE, MIDI_NOTE_TYPE
from _Framework.SliderElement import SliderElement
from _Framework.ButtonElement import ButtonElement
from _Framework.EncoderElement import EncoderElement
from _Framework.DeviceComponent import DeviceComponent
from _Framework.TransportComponent import TransportComponent
from Launchpad.ConfigurableButtonElement import ConfigurableButtonElement
from SessionNavigationComponent import SessionNavigationComponent
from TransportViewModeSelector import TransportViewModeSelector
from SpecialMixerComponent import SpecialMixerComponent
from SpecialSessionComponent import SpecialSessionComponent
from consts import *
IS_MOMENTARY = True
def make_button(cc_no, name):
button = ButtonElement(IS_MOMENTARY, MIDI_CC_TYPE, 0, cc_no)
button.name = name
return button
def make_configurable_button(cc_no, name, type = MIDI_NOTE_TYPE, channel = 0):
button = ConfigurableButtonElement(IS_MOMENTARY, type, channel, cc_no)
button.name = name
return button
def make_encoder(cc_no, name):
encoder = EncoderElement(MIDI_CC_TYPE, 0, cc_no, Live.MidiMap.MapMode.absolute)
encoder.set_feedback_delay(-1)
encoder.name = name
return encoder
def make_slider(cc_no, name):
slider = SliderElement(MIDI_CC_TYPE, 0, cc_no)
slider.set_feedback_delay(-1)
slider.name = name
return slider
class LaunchkeyControlFactory(object):
def create_next_track_button(self):
return make_button(103, 'Next_Track_Button')
def create_prev_track_button(self):
return make_button(102, 'Prev_Track_Button')
def create_scene_launch_button(self):
return make_configurable_button(104, 'Scene_Launch_Button')
def create_scene_stop_button(self):
return make_configurable_button(120, 'Scene_Stop_Button')
def create_clip_launch_button(self, index):
return make_configurable_button(96 + index, 'Clip_Launch_%d' % index)
def create_clip_stop_button(self, index):
return make_configurable_button(112 + index, 'Clip_Stop_%d' % index)
class Launchkey(ControlSurface):
""" Script for Novation's Launchkey 25/49/61 keyboards """
def __init__(self, c_instance, control_factory = LaunchkeyControlFactory(), identity_response = SIZE_RESPONSE):
ControlSurface.__init__(self, c_instance)
self._control_factory = control_factory
self._identity_response = identity_response
with self.component_guard():
self.set_pad_translations(PAD_TRANSLATIONS)
self._device_selection_follows_track_selection = True
self._suggested_input_port = 'Launchkey InControl'
self._suggested_output_port = 'Launchkey InControl'
self._has_sliders = True
self._current_midi_map = None
self._master_slider = make_slider(7, 'Master_Volume_Control')
self._modes_buttons = []
for index in range(3):
button = ButtonElement(IS_MOMENTARY, MIDI_NOTE_TYPE, 0, 13 + index)
self._modes_buttons.append(button)
self._modes_buttons[-1].add_value_listener(self._dummy_listener)
self._setup_mixer()
self._setup_session()
self._setup_transport()
self._setup_device()
self._setup_navigation()
for component in self.components:
component.set_enabled(False)
def refresh_state(self):
ControlSurface.refresh_state(self)
self.schedule_message(2, self._send_midi, LIVE_MODE_ON)
self.schedule_message(3, self._send_midi, SIZE_QUERY)
def handle_sysex(self, midi_bytes):
if midi_bytes[0:11] == self._identity_response:
self._has_sliders = midi_bytes[11] != 48
self._send_midi(LED_FLASHING_ON)
self._update_mixer_offset()
for control in self.controls:
if isinstance(control, InputControlElement):
control.clear_send_cache()
for component in self.components:
component.set_enabled(True)
if self._has_sliders:
self._mixer.master_strip().set_volume_control(self._master_slider)
self._mixer.update()
else:
self._mixer.master_strip().set_volume_control(None)
for index in range(len(self._sliders)):
self._mixer.channel_strip(index).set_volume_control(None)
self._mixer.channel_strip(index).set_mute_button(None)
slider = self._sliders[index]
slider.release_parameter()
self._mixer.selected_strip().set_volume_control(self._master_slider)
self.request_rebuild_midi_map()
def disconnect(self):
ControlSurface.disconnect(self)
for button in self._modes_buttons:
if button.value_has_listener(self._dummy_listener):
button.remove_value_listener(self._dummy_listener)
self._modes_buttons = None
self._encoders = None
self._sliders = None
self._strip_buttons = None
self._master_slider = None
self._current_midi_map = None
self._transport_view_modes = None
self._send_midi(LED_FLASHING_OFF)
self._send_midi(LIVE_MODE_OFF)
def build_midi_map(self, midi_map_handle):
self._current_midi_map = midi_map_handle
ControlSurface.build_midi_map(self, midi_map_handle)
def _setup_mixer(self):
mute_solo_flip_button = make_button(59, 'Master_Button')
self._mixer = SpecialMixerComponent(8)
self._mixer.name = 'Mixer'
self._mixer.selected_strip().name = 'Selected_Channel_Strip'
self._mixer.master_strip().name = 'Master_Channel_Strip'
self._mixer.master_strip().set_volume_control(self._master_slider)
self._sliders = []
self._strip_buttons = []
for index in range(8):
strip = self._mixer.channel_strip(index)
strip.name = 'Channel_Strip_' + str(index)
strip.set_invert_mute_feedback(True)
self._sliders.append(make_slider(41 + index, 'Volume_Control_%d' % index))
strip.set_volume_control(self._sliders[-1])
self._strip_buttons.append(make_button(51 + index, 'Mute_Button_%d' % index))
self._mixer.set_strip_mute_solo_buttons(tuple(self._strip_buttons), mute_solo_flip_button)
def _setup_session(self):
scene_launch_button = self._control_factory.create_scene_launch_button()
scene_stop_button = self._control_factory.create_scene_stop_button()
self._session = SpecialSessionComponent(8, 0)
self._session.name = 'Session_Control'
self._session.selected_scene().name = 'Selected_Scene'
self._session.selected_scene().set_launch_button(scene_launch_button)
self._session.selected_scene().set_triggered_value(GREEN_BLINK)
self._session.set_stop_all_clips_button(scene_stop_button)
scene_stop_button.set_on_off_values(AMBER_FULL, LED_OFF)
self._session.set_mixer(self._mixer)
self._session.set_track_banking_increment(8)
self._session.set_stop_track_clip_value(GREEN_BLINK)
clip_launch_buttons = []
clip_stop_buttons = []
for index in range(8):
clip_launch_buttons.append(self._control_factory.create_clip_launch_button(index))
clip_stop_buttons.append(self._control_factory.create_clip_stop_button(index))
clip_slot = self._session.selected_scene().clip_slot(index)
clip_slot.set_triggered_to_play_value(GREEN_BLINK)
clip_slot.set_triggered_to_record_value(RED_BLINK)
clip_slot.set_stopped_value(AMBER_FULL)
clip_slot.set_started_value(GREEN_FULL)
clip_slot.set_recording_value(RED_FULL)
clip_slot.set_launch_button(clip_launch_buttons[-1])
clip_slot.name = 'Selected_Clip_Slot_' + str(index)
self._session.set_stop_track_clip_buttons(tuple(clip_stop_buttons))
def _setup_transport(self):
rwd_button = make_button(112, 'Rwd_Button')
ffwd_button = make_button(113, 'FFwd_Button')
stop_button = make_button(114, 'Stop_Button')
play_button = make_button(115, 'Play_Button')
loop_button = make_button(116, 'Loop_Button')
rec_button = make_button(117, 'Record_Button')
transport = TransportComponent()
transport.name = 'Transport'
transport.set_stop_button(stop_button)
transport.set_play_button(play_button)
transport.set_record_button(rec_button)
transport.set_loop_button(loop_button)
self._transport_view_modes = TransportViewModeSelector(transport, self._session, ffwd_button, rwd_button)
self._transport_view_modes.name = 'Transport_View_Modes'
def _setup_device(self):
encoders = [ make_encoder(21 + index, 'Device_Control_%d' % index) for index in xrange(8) ]
self._encoders = tuple(encoders)
device = DeviceComponent()
device.name = 'Device_Component'
self.set_device_component(device)
device.set_parameter_controls(self._encoders)
def _setup_navigation(self):
self._next_track_button = self._control_factory.create_next_track_button()
self._prev_track_button = self._control_factory.create_prev_track_button()
self._session_navigation = SessionNavigationComponent(name='Session_Navigation')
self._session_navigation.set_next_track_button(self._next_track_button)
self._session_navigation.set_prev_track_button(self._prev_track_button)
def _dummy_listener(self, value):
pass
def _on_selected_track_changed(self):
ControlSurface._on_selected_track_changed(self)
self._update_mixer_offset()
def _update_mixer_offset(self):
all_tracks = self._session.tracks_to_use()
selected_track = self.song().view.selected_track
num_strips = self._session.width()
if selected_track in all_tracks:
track_index = list(all_tracks).index(selected_track)
new_offset = track_index - track_index % num_strips
self._session.set_offsets(new_offset, self._session.scene_offset())
|
[
"julien@julienbayle.net"
] |
julien@julienbayle.net
|
a7bf7e1b684f8cbdcd574f13eed72023cf50bd3d
|
1bfad01139237049eded6c42981ee9b4c09bb6de
|
/RestPy/ixnetwork_restpy/testplatform/sessions/ixnetwork/statistics/view/formulacatalog/formulacatalog.py
|
b878626afcde61247e92d719fe488330d5646617
|
[
"MIT"
] |
permissive
|
kakkotetsu/IxNetwork
|
3a395c2b4de1488994a0cfe51bca36d21e4368a5
|
f9fb614b51bb8988af035967991ad36702933274
|
refs/heads/master
| 2020-04-22T09:46:37.408010
| 2019-02-07T18:12:20
| 2019-02-07T18:12:20
| 170,284,084
| 0
| 0
|
MIT
| 2019-02-12T08:51:02
| 2019-02-12T08:51:01
| null |
UTF-8
|
Python
| false
| false
| 2,267
|
py
|
# Copyright 1997 - 2018 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class FormulaCatalog(Base):
"""The FormulaCatalog class encapsulates a required formulaCatalog node in the ixnetwork hierarchy.
An instance of the class can be obtained by accessing the FormulaCatalog property from a parent instance.
The internal properties list will contain one and only one set of properties which is populated when the property is accessed.
"""
_SDM_NAME = 'formulaCatalog'
def __init__(self, parent):
super(FormulaCatalog, self).__init__(parent)
@property
def FormulaColumn(self):
"""An instance of the FormulaColumn class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.statistics.view.formulacatalog.formulacolumn.formulacolumn.FormulaColumn)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.statistics.view.formulacatalog.formulacolumn.formulacolumn import FormulaColumn
return FormulaColumn(self)
|
[
"hubert.gee@keysight.com"
] |
hubert.gee@keysight.com
|
68154eff9922df9e73a7cc26900053f1c09742e8
|
b05761d771bb5a85d39d370c649567c1ff3eb089
|
/venv/lib/python3.10/site-packages/secretstorage/collection.py
|
cb7de6508e71b59c58873630fbbd3cc64da134b5
|
[] |
no_license
|
JawshyJ/Coding_Practice
|
88c49cab955eab04609ec1003b6b8c20f103fc06
|
eb6b229d41aa49b1545af2120e6bee8e982adb41
|
refs/heads/master
| 2023-02-19T10:18:04.818542
| 2023-02-06T21:22:58
| 2023-02-06T21:22:58
| 247,788,631
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 96
|
py
|
/home/runner/.cache/pip/pool/94/7c/12/3a414ee6c46ca60714068048f191b66aed9b71448ee97eb892c55d9ec4
|
[
"37465112+JawshyJ@users.noreply.github.com"
] |
37465112+JawshyJ@users.noreply.github.com
|
3629c99cabd1fa8d37fb6433b4c595dcf55e3483
|
6c816f19d7f4a3d89abbb00eeaf43dd818ecc34f
|
/venv/Scripts/easy_install-3.6-script.py
|
a95d3aabec1724ba66e9a38d3cbc071eb631e331
|
[] |
no_license
|
reo-dev/bolt
|
29ee6aa7cfc96bd50fa7a7dae07fbaafc2125e54
|
d1a7859dd1ebe2f5b0e6e295047b620f5afdb92e
|
refs/heads/master
| 2023-07-13T04:05:57.856278
| 2021-08-27T09:07:03
| 2021-08-27T09:07:03
| 382,195,547
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 491
|
py
|
#!C:\Users\kwongibin.DESKTOP-KIC4V0D\PycharmProjects\bolttnut\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install-3.6'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install-3.6')()
)
|
[
"75593016+reo-dev@users.noreply.github.com"
] |
75593016+reo-dev@users.noreply.github.com
|
4e775386203f7562b4f2575de0d1b52520df4054
|
eb677df036fb922c36be3ac309a6b51137161343
|
/bin/alpha
|
fd242e28ca7f18e5ca9be924643804c1d152bc05
|
[] |
no_license
|
kmyk/dotfiles
|
f1be5f1732a22a44605faca92a003de7a40968fa
|
33fbd461135fa6bc4b954c403402d4433cc45abd
|
refs/heads/master
| 2021-07-02T03:17:12.814548
| 2020-09-20T05:56:52
| 2020-09-20T05:56:52
| 11,720,259
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 663
|
#!/usr/bin/env python
import sys
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-u', '--upper', action='store_true')
parser.add_argument('-l', '--lower', action='store_false', dest='upper')
parser.add_argument('-d', '--delimiter', type=str, default=' ')
parser.add_argument('-1', action='store_const', const="\n", dest='delimiter')
parser.add_argument('-0', action='store_const', const='', dest='delimiter')
parser.add_argument('-n', action='store_const', default="\n", const='')
args = parser.parse_args()
sys.stdout.write(args.delimiter.join(list("ABCDEFGHIJKLMNOPQRSTUVWXYZ" if args.upper else "abcdefghijklmnopqrstuvwxyz"))+args.n)
|
[
"kimiyuki95@gmail.com"
] |
kimiyuki95@gmail.com
|
|
77c3d84a6a26cc77989a45e2fc4ac568d1945e4d
|
1e78c5b957b31fe6c7ff9ad54694e7fa0048b46b
|
/movie_reviews/urls.py
|
25f160118eaefb656b03eb7481ebbaa1e53536e1
|
[] |
no_license
|
chloe-codes1/movie-reviews
|
c7c0baa951f5d43ccc924d2a53f458e368193e37
|
df0c5e1c0d32a97df1908489964cea427c9479d0
|
refs/heads/master
| 2021-05-26T15:45:19.743155
| 2020-04-27T04:06:49
| 2020-04-27T04:06:49
| 254,127,014
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 862
|
py
|
"""django_pjt2 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('community/', include('community.urls')),
path('accounts/', include('accounts.urls')),
]
|
[
"juhyun.kim@lindsey.edu"
] |
juhyun.kim@lindsey.edu
|
f79b600165531bc2f99a143d492fcdd1979da970
|
d5ea367d4cb6381c4ccb79448d41ad5e46c77a46
|
/querycorrect/recovery.py
|
524f0999718a28c719f70b734cdf864014326750
|
[
"Apache-2.0"
] |
permissive
|
zouning68/QueryCorrector
|
cbe712f6e89c34b020d05ccd7a274adccd0f1ab6
|
afe3814c7dbd536089611510e82dacc56ef36413
|
refs/heads/master
| 2020-09-13T03:47:49.813394
| 2020-01-19T09:15:13
| 2020-01-19T09:15:13
| 222,647,649
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 26,912
|
py
|
import codecs, operator, os, time, logging, kenlm, jieba, re
from pypinyin import lazy_pinyin
import numpy as np
#***********************************************************************************************************************
PUNCTUATION_LIST = ".。,,,、??::;;{}[]【】“‘’”《》/!!%……()<>@#$~^¥%&*\"\'=+-_——「」"
re_ch = re.compile(u"([\u4e00-\u9fa5])",re.S)
re_en = re.compile(u"([a-zA-Z]*)",re.S)
def is_alphabet_string(string): # 判断是否全部为英文字母
for c in string:
if c < 'a' or c > 'z':
return False
return True
def Q2B(uchar): # 全角转半角
inside_code = ord(uchar)
if inside_code == 0x3000:
inside_code = 0x0020
else:
inside_code -= 0xfee0
if inside_code < 0x0020 or inside_code > 0x7e: # 转完之后不是半角字符返回原来的字符
return uchar
return chr(inside_code)
def stringQ2B(ustring): # 把字符串全角转半角
return "".join([Q2B(uchar) for uchar in ustring])
def uniform(ustring): # 格式化字符串,完成全角转半角,大写转小写的工作
return stringQ2B(ustring).lower()
def edit_distance_word(word, char_set): # all edits that are one edit away from 'word'
splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]
transposes = [L + R[1] + R[0] + R[2:] for L, R in splits if len(R) > 1]
replaces = [L + c + R[1:] for L, R in splits if R for c in char_set]
return set(transposes + replaces)
def is_chinese(uchar): # 判断一个unicode是否是汉字
if '\u4e00' <= uchar <= '\u9fa5':
return True
else:
return False
def is_chinese_string(string): # 判断是否全为汉字
for c in string:
if not is_chinese(c):
return False
return True
class Config:
def __init__(self):
pwd_path = os.path.abspath(os.path.dirname(__file__))
self.word_freq_path = os.path.join(pwd_path, 'data/word_freq.txt') # 通用分词词典文件 format: 词语 词频
self.common_char_path = os.path.join(pwd_path, 'data/common_char_set.txt') # 中文常用字符集
self.same_pinyin_path = os.path.join(pwd_path, 'data/same_pinyin.txt') # 同音字
self.same_stroke_path = os.path.join(pwd_path, 'data/same_stroke.txt') # 形似字
self.language_model_path = os.path.join(pwd_path, 'data/kenlm/people_chars_lm.klm') # language model path
self.custom_confusion_path = os.path.join(pwd_path, 'data/custom_confusion.txt') # 用户自定义错别字混淆集 format:变体 本体 本体词词频(可省略)
self.custom_word_freq_path = os.path.join(pwd_path, 'data/custom_word_freq.txt') # 用户自定义分词词典 format: 词语 词频
self.person_name_path = os.path.join(pwd_path, 'data/person_name.txt') # 知名人名词典 format: 词语 词频
self.place_name_path = os.path.join(pwd_path, 'data/place_name.txt') # 地名词典 format: 词语 词频
self.stopwords_path = os.path.join(pwd_path, 'data/stopwords.txt') # 停用词
config = Config()
class Tokenizer(object):
def __init__(self, dict_path='', custom_word_freq_dict=None, custom_confusion_dict=None):
self.model = jieba
self.model.default_logger.setLevel(logging.ERROR)
# 初始化大词典
if os.path.exists(dict_path):
self.model.set_dictionary(dict_path)
# 加载用户自定义词典
if custom_word_freq_dict:
for w, f in custom_word_freq_dict.items():
self.model.add_word(w, freq=f)
# 加载混淆集词典
if custom_confusion_dict:
for k, word in custom_confusion_dict.items():
# 添加到分词器的自定义词典中
self.model.add_word(k)
self.model.add_word(word)
def tokenize(self, sentence):
seg_res, cur_index = [], 0 ;a=re_en.split(sentence)#; sentence = "上海百度公司java,elastic开法工程师"; aa=list(self.model.tokenize(sentence))
for word in re_en.split(sentence):
word = word.strip()
if word in ['', ' ']: continue
if re_en.fullmatch(word): # 英文处理
seg_res.append((word, cur_index, cur_index+len(word)))
else: # 非英文处理
model_seg = list(self.model.tokenize(word))
seg_res.extend([(e[0], e[1]+cur_index, e[2]+cur_index) for e in model_seg])
cur_index = seg_res[-1][2]
return seg_res
class ErrorType(object): # error_type = {"confusion": 1, "word": 2, "char": 3}
confusion, word, char = 'confusion', 'word', 'char'
class Detector(object):
def __init__(self, language_model_path=config.language_model_path,
word_freq_path=config.word_freq_path,
custom_word_freq_path=config.custom_word_freq_path,
custom_confusion_path=config.custom_confusion_path,
person_name_path=config.person_name_path,
place_name_path=config.place_name_path,
stopwords_path=config.stopwords_path):
self.name = 'detector'
self.language_model_path = language_model_path
self.word_freq_path = word_freq_path
self.custom_word_freq_path = custom_word_freq_path
self.custom_confusion_path = custom_confusion_path
self.person_name_path = person_name_path
self.place_name_path = place_name_path
self.stopwords_path = stopwords_path
self.is_char_error_detect = True
self.is_word_error_detect = True
self.initialized_detector = False
self.enable_rnnlm = False
#initialize detector dict sets
t1 = time.time()
self.lm = kenlm.Model(self.language_model_path)
logging.debug('Loaded language model: %s, spend: %s s' % (self.language_model_path, str(time.time() - t1)))
# 词、频数 dict
t2 = time.time()
self.word_freq = self.load_word_freq_dict(self.word_freq_path)
t3 = time.time()
logging.debug('Loaded word freq file: %s, size: %d, spend: %s s' % (self.word_freq_path, len(self.word_freq), str(t3 - t2)))
# 自定义混淆集
self.custom_confusion = self._get_custom_confusion_dict(self.custom_confusion_path)
t4 = time.time()
logging.debug('Loaded confusion file: %s, size: %d, spend: %s s' % (self.custom_confusion_path, len(self.custom_confusion), str(t4 - t3)))
# 自定义切词词典
self.custom_word_freq = self.load_word_freq_dict(self.custom_word_freq_path)
self.person_names = self.load_word_freq_dict(self.person_name_path)
self.place_names = self.load_word_freq_dict(self.place_name_path)
self.stopwords = self.load_word_freq_dict(self.stopwords_path)
# 合并切词词典及自定义词典
self.custom_word_freq.update(self.person_names)
self.custom_word_freq.update(self.place_names)
self.custom_word_freq.update(self.stopwords)
self.word_freq.update(self.custom_word_freq)
t5 = time.time()
logging.debug('Loaded custom word file: %s, size: %d, spend: %s s' % (self.custom_confusion_path, len(self.custom_word_freq), str(t5 - t4)))
self.tokenizer = Tokenizer(dict_path=self.word_freq_path, custom_word_freq_dict=self.custom_word_freq, custom_confusion_dict=self.custom_confusion)
self.initialized_detector = True
@staticmethod
def load_word_freq_dict(path): # 加载切词词典
word_freq = {}
with codecs.open(path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip()
if line.startswith('#'):
continue
info = line.split()
if len(info) < 1:
continue
word = info[0]
# 取词频,默认1
freq = int(info[1]) if len(info) > 1 else 1
word_freq[word] = freq
return word_freq
def _get_custom_confusion_dict(self, path): # 取自定义困惑集。dict, {variant: origin}, eg: {"交通先行": "交通限行"}
confusion = {}
with codecs.open(path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip()
if line.startswith('#'):
continue
info = line.split()
if len(info) < 2:
continue
variant = info[0]
origin = info[1]
freq = int(info[2]) if len(info) > 2 else 1
self.word_freq[origin] = freq
confusion[variant] = origin
return confusion
def set_language_model_path(self, path):
self.lm = kenlm.Model(path)
logging.debug('Loaded language model: %s' % path)
def set_custom_confusion_dict(self, path):
custom_confusion = self._get_custom_confusion_dict(path)
self.custom_confusion.update(custom_confusion)
logging.debug('Loaded confusion path: %s, size: %d' % (path, len(custom_confusion)))
def set_custom_word(self, path):
word_freqs = self.load_word_freq_dict(path)
# 合并字典
self.custom_word_freq.update(word_freqs)
# 合并切词词典及自定义词典
self.word_freq.update(self.custom_word_freq)
self.tokenizer = Tokenizer(dict_path=self.word_freq_path, custom_word_freq_dict=self.custom_word_freq, custom_confusion_dict=self.custom_confusion)
for k, v in word_freqs.items():
self.set_word_frequency(k, v)
logging.debug('Loaded custom word path: %s, size: %d' % (path, len(word_freqs)))
def enable_char_error(self, enable=True): # is open char error detect
self.is_char_error_detect = enable
def enable_word_error(self, enable=True): # is open word error detect
self.is_word_error_detect = enable
def ngram_score(self, chars): # 取n元文法得分。chars: list, 以词或字切分
a=' '.join(chars)
return self.lm.score(' '.join(chars), bos=False, eos=False)
def ppl_score(self, words): # 取语言模型困惑度得分,越小句子越通顺。words: list, 以词或字切分
return self.lm.perplexity(' '.join(words))
def word_frequency(self, word): # 取词在样本中的词频
return self.word_freq.get(word, 0)
def set_word_frequency(self, word, num): # 更新在样本中的词频
self.word_freq[word] = num
return self.word_freq
@staticmethod
def _check_contain_error(maybe_err, maybe_errors):
#检测错误集合(maybe_errors)是否已经包含该错误位置(maybe_err)。maybe_err: [error_word, begin_pos, end_pos, error_type]
error_word_idx = 0
begin_idx = 1
end_idx = 2
for err in maybe_errors:
if maybe_err[error_word_idx] in err[error_word_idx] and maybe_err[begin_idx] >= err[begin_idx] and \
maybe_err[end_idx] <= err[end_idx]:
return True
return False
def _add_maybe_error_item(self, maybe_err, maybe_errors): # 新增错误
if maybe_err not in maybe_errors and not self._check_contain_error(maybe_err, maybe_errors):
maybe_errors.append(maybe_err)
@staticmethod
def _get_maybe_error_index(scores, ratio=0.6745, threshold=1.4):
"""
取疑似错字的位置,通过平均绝对离差(MAD)
:param scores: np.array
:param threshold: 阈值越小,得到疑似错别字越多
:return: 全部疑似错误字的index: list
"""
result = []
scores = np.array(scores)
if len(scores.shape) == 1:
scores = scores[:, None]
median = np.median(scores, axis=0) # get median of all scores
margin_median = np.abs(scores - median).flatten() # deviation from the median
# 平均绝对离差值
med_abs_deviation = np.median(margin_median)
if med_abs_deviation == 0:
return result
y_score = ratio * margin_median / med_abs_deviation
# 打平
scores = scores.flatten()
maybe_error_indices = np.where((y_score > threshold) & (scores < median))
# 取全部疑似错误字的index
result = list(maybe_error_indices[0])
return result
@staticmethod
def _get_maybe_error_index_by_rnnlm(scores, n=3):
"""
取疑似错字的位置,通过平均值上下三倍标准差之间属于正常点
:param scores: list, float
:param threshold: 阈值越小,得到疑似错别字越多
:return: 全部疑似错误字的index: list
"""
std = np.std(scores, ddof=1)
mean = np.mean(scores)
down_limit = mean - n * std
upper_limit = mean + n * std
maybe_error_indices = np.where((scores > upper_limit) | (scores < down_limit))
# 取全部疑似错误字的index
result = list(maybe_error_indices[0])
return result
@staticmethod
def is_filter_token(token):
result = False
# pass blank
if not token.strip():
result = True
# pass punctuation
if token in PUNCTUATION_LIST:
result = True
# pass num
if token.isdigit():
result = True
# pass alpha
#if is_alphabet_string(token.lower()):
# result = True
return result
def detect(self, sentence):
"""
检测句子中的疑似错误信息,包括[词、位置、错误类型]
:param sentence:
:return: list[list], [error_word, begin_pos, end_pos, error_type]
"""
maybe_errors = []
if not sentence.strip():
return maybe_errors
# 文本归一化
sentence = uniform(sentence)
# 切词
tokens = self.tokenizer.tokenize(sentence)
# print(tokens)
# 自定义混淆集加入疑似错误词典
for confuse in self.custom_confusion:
idx = sentence.find(confuse)
if idx > -1:
maybe_err = [confuse, idx, idx + len(confuse), ErrorType.confusion]
self._add_maybe_error_item(maybe_err, maybe_errors)
if self.is_word_error_detect:
# 未登录词加入疑似错误词典
for word, begin_idx, end_idx in tokens:
# pass filter word
if self.is_filter_token(word):
continue
# pass in dict
if word in self.word_freq:
continue
maybe_err = [word, begin_idx, end_idx, ErrorType.word]
self._add_maybe_error_item(maybe_err, maybe_errors)
if self.is_char_error_detect:
# 语言模型检测疑似错误字
if self.enable_rnnlm:
scores = self.char_scores(sentence)
# 取疑似错字信息
for i in self._get_maybe_error_index_by_rnnlm(scores):
token = sentence[i]
# pass filter word
if self.is_filter_token(token):
continue
maybe_err = [token, i, i + 1, ErrorType.char] # token, begin_idx, end_idx, error_type
self._add_maybe_error_item(maybe_err, maybe_errors)
else:
try:
ngram_avg_scores = []
for n in [2, 3]:
scores = []
for i in range(len(sentence) - n + 1):
word = sentence[i:i + n]
score = self.ngram_score(list(word))
scores.append(score)
if not scores:
continue
# 移动窗口补全得分
for _ in range(n - 1):
scores.insert(0, scores[0])
scores.append(scores[-1])
avg_scores = [sum(scores[i:i + n]) / len(scores[i:i + n]) for i in range(len(sentence))]
ngram_avg_scores.append(avg_scores)
# 取拼接后的n-gram平均得分
sent_scores = list(np.average(np.array(ngram_avg_scores), axis=0))
# 取疑似错字信息
for i in self._get_maybe_error_index(sent_scores):
token = sentence[i]
# pass filter word
if self.is_filter_token(token):
continue
maybe_err = [token, i, i + 1, ErrorType.char] # token, begin_idx, end_idx, error_type
self._add_maybe_error_item(maybe_err, maybe_errors)
except IndexError as ie:
logging.warning("index error, sentence:" + sentence + str(ie))
except Exception as e:
logging.warning("detect error, sentence:" + sentence + str(e))
return sorted(maybe_errors, key=lambda k: k[1], reverse=False)
#***********************************************************************************************************************
def load_char_set(path):
words = set()
if not os.path.exists(path):
logging.warning("file not exists:" + path)
return words
with codecs.open(path, 'r', encoding='utf-8') as f:
for w in f:
words.add(w.strip())
return words
def load_same_pinyin(path, sep='\t'): # 加载同音字
result = dict()
if not os.path.exists(path):
logging.warning("file not exists:" + path)
return result
with codecs.open(path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip()
if line.startswith('#'):
continue
parts = line.split(sep)
if parts and len(parts) > 2:
key_char = parts[0]
same_pron_same_tone = set(list(parts[1]))
same_pron_diff_tone = set(list(parts[2]))
value = same_pron_same_tone.union(same_pron_diff_tone)
if len(key_char) > 1 or not value:
continue
result[key_char] = value
return result
def load_same_stroke(path, sep='\t'): # 加载形似字
result = dict()
if not os.path.exists(path):
logging.warning("file not exists:" + path)
return result
with codecs.open(path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip()
if line.startswith('#'):
continue
parts = line.split(sep)
if parts and len(parts) > 1:
for i, c in enumerate(parts):
result[c] = set(list(parts[:i] + parts[i + 1:]))
return result
class Corrector(Detector):
def __init__(self, common_char_path=config.common_char_path,
same_pinyin_path=config.same_pinyin_path,
same_stroke_path=config.same_stroke_path,
language_model_path=config.language_model_path,
word_freq_path=config.word_freq_path,
custom_word_freq_path=config.custom_word_freq_path,
custom_confusion_path=config.custom_confusion_path,
person_name_path=config.person_name_path,
place_name_path=config.place_name_path,
stopwords_path=config.stopwords_path):
super(Corrector, self).__init__(language_model_path=language_model_path,
word_freq_path=word_freq_path,
custom_word_freq_path=custom_word_freq_path,
custom_confusion_path=custom_confusion_path,
person_name_path=person_name_path,
place_name_path=place_name_path,
stopwords_path=stopwords_path)
self.name = 'corrector'
self.common_char_path = common_char_path
self.same_pinyin_text_path = same_pinyin_path
self.same_stroke_text_path = same_stroke_path
self.initialized_corrector = False
# initialize corrector dict sets
t1 = time.time()
self.cn_char_set = load_char_set(self.common_char_path) # chinese common char dict
self.same_pinyin = load_same_pinyin(self.same_pinyin_text_path) # same pinyin
self.same_stroke = load_same_stroke(self.same_stroke_text_path) # same stroke
logging.debug("Loaded same pinyin file: %s, same stroke file: %s, spend: %.3f s." % (
self.same_pinyin_text_path, self.same_stroke_text_path, time.time() - t1))
self.initialized_corrector = True
def get_same_pinyin(self, char): # 取同音字
return self.same_pinyin.get(char, set())
def get_same_stroke(self, char): # 取形似字
return self.same_stroke.get(char, set())
def known(self, words): # 取得词序列中属于常用词部分
return set(word for word in words if word in self.word_freq)
def _confusion_char_set(self, c):
return self.get_same_pinyin(c).union(self.get_same_stroke(c))
def _confusion_word_set(self, word):
confusion_word_set = set()
candidate_words = list(self.known(edit_distance_word(word, self.cn_char_set)))
for candidate_word in candidate_words:
if lazy_pinyin(candidate_word) == lazy_pinyin(word):
# same pinyin
confusion_word_set.add(candidate_word)
return confusion_word_set
def _confusion_custom_set(self, word):
confusion_word_set = set()
if word in self.custom_confusion:
confusion_word_set = {self.custom_confusion[word]}
return confusion_word_set
def generate_items(self, word, fraction=1): # 生成纠错候选集
candidates_1_order = []
candidates_2_order = []
candidates_3_order = []
# same pinyin word
candidates_1_order.extend(self._confusion_word_set(word))
# custom confusion word
candidates_1_order.extend(self._confusion_custom_set(word))
# same pinyin char
if len(word) == 1:
# same one char pinyin
confusion = [i for i in self._confusion_char_set(word[0]) if i]
candidates_1_order.extend(confusion)
if len(word) == 2:
# same first char pinyin
confusion = [i + word[1:] for i in self._confusion_char_set(word[0]) if i]
candidates_2_order.extend(confusion)
# same last char pinyin
confusion = [word[:-1] + i for i in self._confusion_char_set(word[-1]) if i]
candidates_2_order.extend(confusion)
if len(word) > 2:
# same mid char pinyin
confusion = [word[0] + i + word[2:] for i in self._confusion_char_set(word[1])]
candidates_3_order.extend(confusion)
# same first word pinyin
confusion_word = [i + word[-1] for i in self._confusion_word_set(word[:-1])]
candidates_3_order.extend(confusion_word)
# same last word pinyin
confusion_word = [word[0] + i for i in self._confusion_word_set(word[1:])]
candidates_3_order.extend(confusion_word)
# add all confusion word list
confusion_word_set = set(candidates_1_order + candidates_2_order + candidates_3_order)
confusion_word_list = [item for item in confusion_word_set if is_chinese_string(item)]
confusion_sorted = sorted(confusion_word_list, key=lambda k: self.word_frequency(k), reverse=True)
return confusion_sorted[:len(confusion_word_list) // fraction + 1]
def lm_correct_item(self, item, maybe_right_items, before_sent, after_sent): # 通过语音模型纠正字词错误
if item not in maybe_right_items:
maybe_right_items.append(item)
corrected_item = min(maybe_right_items, key=lambda k: self.ppl_score(list(before_sent + k + after_sent)))
return corrected_item
def correct(self, sentence):
"""
句子改错
:param sentence: 句子文本
:return: 改正后的句子, list(wrong, right, begin_idx, end_idx)
"""
detail = []
# 长句切分为短句
# sentences = re.split(r";|,|。|\?\s|;\s|,\s", sentence)
maybe_errors = self.detect(sentence)
# trick: 类似翻译模型,倒序处理
maybe_errors = sorted(maybe_errors, key=operator.itemgetter(2), reverse=True)
for item, begin_idx, end_idx, err_type in maybe_errors:
# 纠错,逐个处理
before_sent = sentence[:begin_idx]
after_sent = sentence[end_idx:]
# 困惑集中指定的词,直接取结果
if err_type == ErrorType.confusion:
corrected_item = self.custom_confusion[item]
else:
# 对非中文的错字不做处理
if not is_chinese_string(item):
continue
# 取得所有可能正确的词
maybe_right_items = self.generate_items(item)
if not maybe_right_items:
continue
corrected_item = self.lm_correct_item(item, maybe_right_items, before_sent, after_sent)
# output
if corrected_item != item:
sentence = before_sent + corrected_item + after_sent
# logging.debug('predict:' + item + '=>' + corrected_item)
detail_word = [item, corrected_item, begin_idx, end_idx]
detail.append(detail_word)
detail = sorted(detail, key=operator.itemgetter(2))
return sentence, detail
if __name__ == "__main__":
c = Corrector()
corrected_sent, detail = c.correct('黑龙江大学,学与应用数学')
print(corrected_sent, detail)
|
[
"noreply@github.com"
] |
zouning68.noreply@github.com
|
bde40d296203cf014d6fa9584b60e567c306d60d
|
781e2692049e87a4256320c76e82a19be257a05d
|
/all_data/exercism_data/python/rna-transcription/64f6942fedc844af848cc19de6e53748.py
|
bc316926c867570f3906523b44066297c83447b6
|
[] |
no_license
|
itsolutionscorp/AutoStyle-Clustering
|
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
|
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
|
refs/heads/master
| 2020-12-11T07:27:19.291038
| 2016-03-16T03:18:00
| 2016-03-16T03:18:42
| 59,454,921
| 4
| 0
| null | 2016-05-23T05:40:56
| 2016-05-23T05:40:56
| null |
UTF-8
|
Python
| false
| false
| 239
|
py
|
#didnt work out with replace method
#so i used the translate method with a map
#Is translate deprecated in Python3.4?
def to_rna(what):
mapper = what.maketrans('GCTA','CGAU')
what = what.translate(mapper)
return what
|
[
"rrc@berkeley.edu"
] |
rrc@berkeley.edu
|
848502b51141149a40a491138906f07fd3f8693d
|
c96cd25f0a74a7feecbf363948bab51018b719f1
|
/tests/modules/jmeter/test_JMX.py
|
7dad01cad775949d32372aaf1ec674e835b7fc08
|
[
"Apache-2.0"
] |
permissive
|
YajanaRao/taurus
|
ee7468ec287a4b07ea56ca671537f363906bbe26
|
c08e5b90063bf4f7904e8ec4eb5f3c50a8e89ac0
|
refs/heads/master
| 2020-03-28T10:38:43.563353
| 2018-09-10T14:58:35
| 2018-09-10T14:58:35
| 148,129,849
| 0
| 0
|
Apache-2.0
| 2018-09-10T09:17:42
| 2018-09-10T09:17:42
| null |
UTF-8
|
Python
| false
| false
| 14,515
|
py
|
# coding=utf-8
from . import MockJMeterExecutor
from bzt.engine import Provisioning
from bzt.jmx import JMX, LoadSettingsProcessor
from tests import BZTestCase, RESOURCES_DIR
class TestLoadSettingsProcessor(BZTestCase):
def configure(self, jmx_file=None, load=None, settings=None, has_ctg=None):
executor = MockJMeterExecutor(load, settings, has_ctg)
executor.engine.config.merge({Provisioning.PROV: 'local'})
self.obj = LoadSettingsProcessor(executor)
if jmx_file:
self.jmx = JMX(jmx_file)
def get_groupset(self, testname=None):
groupset = []
for group in self.obj.tg_handler.groups(self.jmx):
# 'testname == None' means 'get all groups'
if not testname or (testname and group.element.attrib['testname'] == testname):
groupset.append(group)
return groupset
def test_keep_original(self):
self.configure(jmx_file=RESOURCES_DIR + 'jmeter/jmx/threadgroups.jmx')
self.assertEqual(LoadSettingsProcessor.TG, self.obj.tg) # because no duration
self.sniff_log(self.obj.log)
self.obj.modify(self.jmx)
msg = "No iterations/concurrency/duration found, thread group modification is skipped"
self.assertIn(msg, self.log_recorder.debug_buff.getvalue())
groupset = self.get_groupset()
groups = [group.gtype for group in groupset]
self.assertEqual(5, len(set(groups))) # no one group was modified
self.assertEqual("", self.log_recorder.warn_buff.getvalue())
res_values = {}
for group in groupset:
res_values[group.get_testname()] = {
'conc': group.get_concurrency(),
'rate': group.get_rate(),
'duration': group.get_duration(),
'iterations': group.get_iterations()}
self.assertEqual(res_values,
{'TG.01': {'conc': 2, 'duration': 3, 'iterations': 100, 'rate': None},
'CTG.02': {'conc': 3, 'duration': 100, 'iterations': None, 'rate': None},
'STG.03': {'conc': 4, 'duration': None, 'iterations': None, 'rate': None},
'UTG.04': {'conc': 1, 'duration': None, 'iterations': None, 'rate': None},
'ATG.05': {'conc': 1, 'duration': 480, 'iterations': 33, 'rate': 2}})
def test_TG_cs(self):
""" ThreadGroup: concurrency, steps """
self.configure(load={'concurrency': 76, 'steps': 5},
jmx_file=RESOURCES_DIR + 'jmeter/jmx/threadgroups.jmx')
self.assertEqual(LoadSettingsProcessor.TG, self.obj.tg) # because no duration
self.sniff_log(self.obj.log)
self.obj.modify(self.jmx)
msg = 'Getting of concurrency for UltimateThreadGroup not implemented'
self.assertIn(msg, self.log_recorder.warn_buff.getvalue())
msg = "Had to add 1 more threads to maintain thread group proportion"
self.assertIn(msg, self.log_recorder.warn_buff.getvalue())
msg = "Stepping ramp-up isn't supported for regular ThreadGroup"
self.assertIn(msg, self.log_recorder.warn_buff.getvalue())
res_values = {}
for group in self.get_groupset():
self.assertEqual('ThreadGroup', group.gtype)
self.assertEqual("false", group.element.find(".//*[@name='LoopController.continue_forever']").text)
self.assertEqual("-1", group.element.find(".//*[@name='LoopController.loops']").text) # no loop limit
res_values[group.get_testname()] = {'conc': group.get_concurrency(), 'on_error': group.get_on_error()}
self.assertEqual(res_values,
{'TG.01': {'conc': 14, 'on_error': 'startnextloop'},
'CTG.02': {'conc': 21, 'on_error': 'stopthread'},
'STG.03': {'conc': 28, 'on_error': 'stoptest'},
'UTG.04': {'conc': 7, 'on_error': 'stoptestnow'},
'ATG.05': {'conc': 7, 'on_error': 'continue'}})
def test_CTG_crs(self):
""" ConcurrencyThreadGroup: concurrency, ramp-up, steps """
self.configure(load={'concurrency': 71, 'ramp-up': 100, 'steps': 5},
jmx_file=RESOURCES_DIR + 'jmeter/jmx/threadgroups.jmx')
self.assertEqual(LoadSettingsProcessor.CTG, self.obj.tg)
self.sniff_log(self.obj.log)
self.obj.modify(self.jmx)
msg = 'Getting of concurrency for UltimateThreadGroup not implemented'
self.assertIn(msg, self.log_recorder.warn_buff.getvalue())
msg = "1 threads left undistributed due to thread group proportion"
self.assertIn(msg, self.log_recorder.warn_buff.getvalue())
res_values = {}
for group in self.get_groupset():
self.assertEqual(group.gtype, "ConcurrencyThreadGroup")
self.assertEqual("5", group.element.find(".//*[@name='Steps']").text)
self.assertEqual("100", group.element.find(".//*[@name='RampUp']").text)
self.assertEqual("S", group.element.find(".//*[@name='Unit']").text)
self.assertIn(group.element.find(".//*[@name='Hold']").text, ("", "0"))
res_values[group.get_testname()] = {'conc': group.get_concurrency(), 'on_error': group.get_on_error()}
self.assertEqual(res_values,
{'TG.01': {'conc': 13, 'on_error': 'startnextloop'},
'CTG.02': {'conc': 19, 'on_error': 'stopthread'},
'STG.03': {'conc': 26, 'on_error': 'stoptest'},
'UTG.04': {'conc': 6, 'on_error': 'stoptestnow'},
'ATG.05': {'conc': 6, 'on_error': 'continue'}})
def test_CTG_prop_rs(self):
""" ConcurrencyThreadGroup: properties in ramp-up, steps """
self.configure(load={'ramp-up': '${__P(r)}', 'steps': '${__P(s)}'},
jmx_file=RESOURCES_DIR + 'jmeter/jmx/threadgroups.jmx')
self.assertEqual(LoadSettingsProcessor.CTG, self.obj.tg)
self.obj.modify(self.jmx)
res_values = {}
for group in self.get_groupset():
self.assertEqual(group.gtype, "ConcurrencyThreadGroup")
self.assertEqual("${__P(s)}", group.element.find(".//*[@name='Steps']").text)
self.assertEqual("${__P(r)}", group.element.find(".//*[@name='RampUp']").text)
self.assertIn(group.element.find(".//*[@name='Hold']").text, ("", "0"))
res_values[group.get_testname()] = group.get_concurrency()
self.assertEqual(res_values, {'TG.01': 2, 'CTG.02': 3, 'STG.03': 4, 'UTG.04': 1, 'ATG.05': 1})
def test_CTG_prop_trh(self):
""" ConcurrencyThreadGroup: properties in throughput, ramp-up, hold-for """
self.configure(load={'ramp-up': '${__P(r)}', 'throughput': '${__P(t)}', 'hold-for': '${__P(h)}'},
jmx_file=RESOURCES_DIR + 'jmeter/jmx/threadgroups.jmx')
self.assertEqual(LoadSettingsProcessor.CTG, self.obj.tg)
self.obj.modify(self.jmx)
shaper_elements = self.jmx.get("kg\.apc\.jmeter\.timers\.VariableThroughputTimer")
self.assertEqual(1, len(shaper_elements))
shaper_collection = shaper_elements[0].find(".//collectionProp[@name='load_profile']")
coll_elements = shaper_collection.findall(".//collectionProp")
self.assertEqual(2, len(coll_elements))
strings0 = coll_elements[0].findall(".//stringProp")
self.assertEqual("1", strings0[0].text)
self.assertEqual("${__P(t)}", strings0[1].text)
self.assertEqual("${__P(r)}", strings0[2].text)
strings1 = coll_elements[1].findall(".//stringProp")
self.assertEqual("${__P(t)}", strings1[0].text)
self.assertEqual("${__P(t)}", strings1[1].text)
self.assertEqual("${__P(h)}", strings1[2].text)
def test_TG_prop_cih(self):
""" ThreadGroup: properties in concurrency, hold-for, iterations """
self.configure(load={'concurrency': '${__P(c)}', 'hold-for': '${__P(h)}', 'iterations': '${__P(i)}'},
jmx_file=RESOURCES_DIR + 'jmeter/jmx/threadgroups.jmx')
self.assertEqual(LoadSettingsProcessor.TG, self.obj.tg)
self.obj.modify(self.jmx)
for group in self.get_groupset():
self.assertEqual(group.gtype, "ThreadGroup")
self.assertEqual("${__P(c)}", group.element.find(".//*[@name='ThreadGroup.num_threads']").text)
self.assertEqual("${__P(i)}", group.element.find(".//*[@name='LoopController.loops']").text)
self.assertEqual("${__P(h)}", group.element.find(".//*[@name='ThreadGroup.duration']").text)
def test_TG_prop_rh(self):
""" ThreadGroup: properties in ramp-up, hold-for """
self.configure(load={'ramp-up': '${__P(r)}', 'hold-for': '${__P(h)}'},
jmx_file=RESOURCES_DIR + 'jmeter/jmx/threadgroups.jmx', has_ctg=False)
self.assertEqual(LoadSettingsProcessor.TG, self.obj.tg)
self.obj.modify(self.jmx)
for group in self.get_groupset():
self.assertEqual(group.gtype, "ThreadGroup")
self.assertEqual("-1", group.element.find(".//*[@name='LoopController.loops']").text)
self.assertEqual("${__P(r)}", group.element.find(".//*[@name='ThreadGroup.ramp_time']").text)
self.assertEqual("${__intSum(${__P(r)},${__P(h)})}",
group.element.find(".//*[@name='ThreadGroup.duration']").text)
def test_CTG_h(self):
""" ConcurrencyThreadGroup: hold-for """
self.configure(load={'hold-for': 70.5}, jmx_file=RESOURCES_DIR + 'jmeter/jmx/threadgroups.jmx')
self.assertEqual(LoadSettingsProcessor.CTG, self.obj.tg)
self.obj.modify(self.jmx)
res_values = {}
for group in self.get_groupset():
self.assertEqual("70", group.element.find(".//*[@name='Hold']").text)
res_values[group.get_testname()] = group.get_concurrency()
self.assertEqual(res_values, {'TG.01': 2, 'CTG.02': 3, 'STG.03': 4, 'UTG.04': 1, 'ATG.05': 1})
def test_TG_ci(self):
""" ThreadGroup: concurrency, iterations """
self.configure(load={'concurrency': 1, 'iterations': 7},
jmx_file=RESOURCES_DIR + 'jmeter/jmx/threadgroups.jmx')
self.assertEqual(LoadSettingsProcessor.TG, self.obj.tg)
self.obj.modify(self.jmx)
for group in self.get_groupset():
self.assertEqual(1, group.get_concurrency())
self.assertEqual("false", group.element.find(".//*[@name='ThreadGroup.scheduler']").text)
self.assertEqual("7", group.element.find(".//*[@name='LoopController.loops']").text)
def test_TG_hr(self):
""" ThreadGroup: hold-for, ramp-up, no plugin """
self.configure(load={'ramp-up': 10, 'hold-for': 20},
jmx_file=RESOURCES_DIR + 'jmeter/jmx/threadgroups.jmx',
has_ctg=False)
self.assertEqual(LoadSettingsProcessor.TG, self.obj.tg)
self.obj.modify(self.jmx)
res_values = {}
for group in self.get_groupset():
self.assertEqual("true", group.element.find(".//*[@name='ThreadGroup.scheduler']").text)
self.assertEqual("true", group.element.find(".//*[@name='ThreadGroup.scheduler']").text)
self.assertEqual(str(10 + 20), group.element.find(".//*[@name='ThreadGroup.duration']").text)
self.assertEqual("-1", group.element.find(".//*[@name='LoopController.loops']").text)
res_values[group.get_testname()] = group.get_concurrency()
self.assertEqual(res_values, {'TG.01': 2, 'CTG.02': 3, 'STG.03': 4, 'UTG.04': 1, 'ATG.05': 1})
class TestJMX(BZTestCase):
def test_jmx_unicode_checkmark(self):
obj = JMX()
res = obj._get_http_request("url", "label", "method", 0, {"param": u"✓"}, True)
prop = res.find(".//stringProp[@name='Argument.value']")
self.assertNotEqual("BINARY", prop.text)
self.assertEqual(u"✓", prop.text)
def test_variable_hostname(self):
obj = JMX()
res = obj._get_http_request("http://${hostName}:${Port}/${Path}", "label", "method", 0, {}, True)
self.assertEqual("/${Path}", res.find(".//stringProp[@name='HTTPSampler.path']").text)
self.assertEqual("${hostName}", res.find(".//stringProp[@name='HTTPSampler.domain']").text)
self.assertEqual("${Port}", res.find(".//stringProp[@name='HTTPSampler.port']").text)
def test_no_port(self):
obj = JMX()
res = obj._get_http_request("http://hostname", "label", "method", 0, {}, True)
self.assertEqual("", res.find(".//stringProp[@name='HTTPSampler.path']").text)
self.assertEqual("hostname", res.find(".//stringProp[@name='HTTPSampler.domain']").text)
self.assertEqual("", res.find(".//stringProp[@name='HTTPSampler.port']").text)
def test_regexp_subject(self):
res = JMX._get_extractor('test_name', 'baddy', 'regexp', 1, 1, 'error')
self.assertEqual("body", res.find(".//stringProp[@name='RegexExtractor.useHeaders']").text)
res = JMX._get_extractor('test_name', 'headers', 'regexp', 1, 1, 'error')
self.assertEqual("true", res.find(".//stringProp[@name='RegexExtractor.useHeaders']").text)
res = JMX._get_extractor('test_name', 'http-code', 'regexp', 1, 1, 'error')
self.assertEqual("code", res.find(".//stringProp[@name='RegexExtractor.useHeaders']").text)
self.assertIsNone(res.find(".//stringProp[@name='Sample.scope']"))
def test_int_udv(self):
res = JMX()
data = {"varname2": "1", "varname": 1, 2: 3}
res.add_user_def_vars_elements(data)
def test_source_ips_single(self):
obj = JMX()
res = obj._get_http_request("/", "label", "method", 0, {}, True,
use_random_host_ip=True, host_ips=["192.168.1.1"])
self.assertEqual("192.168.1.1", res.find(".//stringProp[@name='HTTPSampler.ipSource']").text)
def test_source_ips_multiple(self):
obj = JMX()
res = obj._get_http_request("/", "label", "method", 0, {}, True,
use_random_host_ip=True, host_ips=["192.168.1.1", "192.168.1.2"])
self.assertEqual("${__chooseRandom(192.168.1.1,192.168.1.2,randomAddr)}",
res.find(".//stringProp[@name='HTTPSampler.ipSource']").text)
|
[
"apc4@ya.ru"
] |
apc4@ya.ru
|
a3b15afa593d694915db65262098bf26c3ff1509
|
5e5610e07441b320e4b6a088c0f6cc93334bba91
|
/transportation/management/commands/services.py
|
c2a831900e54fb88826d3c55805a063f27071ef5
|
[] |
no_license
|
pixmin/poimap
|
cdea21aeb753e358166474033dc68f9eac8e929f
|
b736a2bbf40467307aa2e12012347fb44be34cf9
|
refs/heads/master
| 2020-05-02T07:42:18.298709
| 2019-03-26T16:10:53
| 2019-03-26T16:10:53
| 177,825,011
| 0
| 0
| null | 2019-03-26T16:12:00
| 2019-03-26T16:12:00
| null |
UTF-8
|
Python
| false
| false
| 2,376
|
py
|
# -*- coding: utf-8 -*-
from django.conf import settings
from django.core.management.base import BaseCommand
from django.contrib.gis.geos import GEOSGeometry
from django.utils.text import slugify
from geopy.geocoders import GoogleV3
from poimap.models import POIType
from transportation.models import Line, Route, RouteStop, Stop, Service
import csv
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('--import', dest='import', action="store_true")
parser.add_argument('--export', dest='export', action="store_true")
def handle(self, *args, **options):
if options["import"] and options["export"]:
print "Only once of these arguments at a time : --import or --export"
return
if options["import"]:
Service.objects.all().delete()
with open('data/services.csv') as csvfile:
reader = csv.reader(csvfile, delimiter="|", quotechar='"')
for row in reader:
line_name, route_name, service_name, frequency_label = row
line_name = line_name.decode('utf-8')
route_name = route_name.decode('utf-8')
service_name = service_name.decode('utf-8')
line_name_slug = slugify(line_name)
route_name_slug = slugify(route_name)
route = Route.objects.get(slug=route_name_slug, line__slug=line_name_slug)
Service.objects.create(name=service_name, route=route, frequency_label=frequency_label)
elif options["export"]:
csv.register_dialect('troucelier', delimiter='|', quoting=csv.QUOTE_MINIMAL)
with open('data/export/services.csv', 'wb') as f:
writer = csv.writer(f, 'troucelier')
for line in Line.objects.all():
for route in line.routes.all():
for service in route.services.all():
writer.writerow([line.name.encode('utf-8'),
route.name.encode('utf-8'),
service.name.encode('utf-8'),
service.frequency_label.encode('utf-8')])
else:
print "Missing argument --import or --export"
return
|
[
"alban.tiberghien@gmail.com"
] |
alban.tiberghien@gmail.com
|
bb1416137eb4f898b55ecbf227c26ea57e6b504b
|
55ab64b67d8abc02907eb43a54ff6c326ded6b72
|
/scripts/addon_library/local/weight_layers/layer_scripts/WLAYER_procedural_texture.py
|
3c74ed302830f717ee1cea767d61171ba2ba06b6
|
[
"MIT"
] |
permissive
|
Tilapiatsu/blender-custom_config
|
2f03b0bb234c3b098d2830732296d199c91147d0
|
00e14fc190ebff66cf50ff911f25cf5ad3529f8f
|
refs/heads/master
| 2023-08-16T14:26:39.990840
| 2023-08-16T01:32:41
| 2023-08-16T01:32:41
| 161,249,779
| 6
| 2
|
MIT
| 2023-04-12T05:33:59
| 2018-12-10T23:25:14
|
Python
|
UTF-8
|
Python
| false
| false
| 1,328
|
py
|
from .WL_layer_functions import CustomLayerSettingsBase
import bpy
class CustomLayerSettings(CustomLayerSettingsBase):
def texture_enum_items(self, context):
items = []
for node in self.layer.layer_group.nodes:
if "TEX" in node.type:
name = node.name
items.append((node.label, name, name))
items.sort(key=lambda item: int(item[0]))
return items
def texture_enum_update(self, context):
self.node.inputs[2].default_value = int(self.texture_enum)
texture_enum: bpy.props.EnumProperty(items=texture_enum_items, update=texture_enum_update)
def on_creation(self, context):
self.texture_enum_update(context)
def draw_layer(self, context, layout):
self.draw_mix_settings(layout)
layout.separator(factor=0.5)
layout.prop(self, "texture_enum", text="", icon="TEXTURE_DATA")
layout.separator(factor=0.5)
nodes = {n.label: n for n in self.layer.layer_group.nodes}
node = nodes[self.texture_enum]
node.draw_buttons(context, layout)
layout.separator(factor=0.5)
layout = layout.column(align=True)
self.draw_node_inputs(context, layout, node)
self.draw_node_inputs(context, layout)
self.draw_adjustments_stack(context, layout)
|
[
"tilapiatsu@hotmail.fr"
] |
tilapiatsu@hotmail.fr
|
753b42ccddf3a2b6b07464b8a1830571ae36357c
|
9a72da59caf9d829cb3981a4b5e8bcde640732be
|
/releng_tool/engine/post.py
|
91998ef0745137254bcc8301649d44f51dd92fcd
|
[
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
releng-tool/releng-tool
|
0fa8c44b864ee10e7a8c8eeb54af7acc62b3cd56
|
d05eb2153c72e9bd82c5fdddd5eb41d5316592d6
|
refs/heads/main
| 2023-08-22T09:52:12.341285
| 2023-08-06T21:27:18
| 2023-08-06T21:27:18
| 155,482,664
| 12
| 2
|
BSD-2-Clause
| 2023-01-03T06:21:00
| 2018-10-31T01:54:18
|
Python
|
UTF-8
|
Python
| false
| false
| 1,775
|
py
|
# -*- coding: utf-8 -*-
# Copyright releng-tool
# SPDX-License-Identifier: BSD-2-Clause
from releng_tool.util.io import interim_working_dir
from releng_tool.util.io import opt_file
from releng_tool.util.io import run_script
from releng_tool.util.log import note
from releng_tool.util.log import verbose
import os
import sys
#: filename of the script to execute the post-processing operation (if any)
POST_SCRIPT = 'post'
def stage(engine, pkg, script_env): # noqa: ARG001
"""
handles the post-processing stage for a package
With a provided engine and package instance, the post-processing stage will
be processed. This stage is typically not advertised and is for advanced
cases where a developer wishes to manipulate their build environment after
package has completed each of its phases.
Args:
engine: the engine
pkg: the package being built
script_env: script environment information
Returns:
``True`` if the post-processing stage is completed; ``False`` otherwise
"""
verbose('post-processing {} (pre-check)...', pkg.name)
sys.stdout.flush()
post_script_filename = '{}-{}'.format(pkg.name, POST_SCRIPT)
post_script = os.path.join(pkg.def_dir, post_script_filename)
post_script, post_script_exists = opt_file(post_script)
if not post_script_exists:
return True
note('post-processing {}...', pkg.name)
sys.stdout.flush()
if pkg.build_subdir:
build_dir = pkg.build_subdir
else:
build_dir = pkg.build_dir
with interim_working_dir(build_dir):
if not run_script(post_script, script_env, subject='post-processing'):
return False
verbose('post-processing script executed: ' + post_script)
return True
|
[
"james.d.knight@live.com"
] |
james.d.knight@live.com
|
e73b0c2f931c70d88a494d81742b662b1f9f794a
|
d346c1e694e376c303f1b55808d90429a1ad3c3a
|
/easy/412.fizzBuzz.py
|
1d68ac4f80bf7dfcb5ab984bf7365445d85410af
|
[] |
no_license
|
littleliona/leetcode
|
3d06bc27c0ef59b863a2119cd5222dc94ed57b56
|
789d8d5c9cfd90b872be4a4c35a34a766d95f282
|
refs/heads/master
| 2021-01-19T11:52:11.938391
| 2018-02-19T03:01:47
| 2018-02-19T03:01:47
| 88,000,832
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 539
|
py
|
class Solution(object):
def fizzBuzz(self, n):
"""
:type n: int
:rtype: List[str]
"""
#mine
L = []
for i in range(1, n+1):
if i%15 == 0:
L.append("FizzBuzz")
elif i%3 == 0:
L.append("Fizz")
elif i%5 == 0:
L.append("Buzz")
else:
L.append(str(i))
return L
#easy
return ['Fizz' * (not i % 3) + 'Buzz' * (not i % 5) or str(i) for i in range(1, n+1)]
s = Solution()
s.fizzBuzz(15)
|
[
"aria@Arias-MacBook-Pro.local"
] |
aria@Arias-MacBook-Pro.local
|
02bdabe52bc097773b81a49357863b67bc913c97
|
73332abdcadb62f4f262d0c30856c3c257a9ee7d
|
/tests/mixins/test_iomixin.py
|
5446c85b67adfa439bc9fec1f77da92295621017
|
[
"BSD-2-Clause"
] |
permissive
|
code-google-com/oyprojectmanager
|
454435604cc150c1b54ec2c54294e0fa05490f82
|
3085ecbe1cc04a73ec69b4848b789009546feae7
|
refs/heads/master
| 2021-01-19T02:40:56.342086
| 2015-01-26T16:40:00
| 2015-01-26T16:40:00
| 32,266,400
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,925
|
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2009-2012, Erkan Ozgur Yilmaz
#
# This module is part of oyProjectManager and is released under the BSD 2
# License: http://www.opensource.org/licenses/BSD-2-Clause
import os
import shutil
import tempfile
import unittest
from sqlalchemy import Column, Integer
from oyProjectManager import conf, db
from oyProjectManager.db.declarative import Base
from oyProjectManager.models.link import FileLink
from oyProjectManager.models.mixins import IOMixin
class IOMixedInClass(Base, IOMixin):
"""A class which is mixed with IOMixin for testing purposes
"""
__tablename__ = "IOMixClasses"
IOMixClass_id = Column("id", Integer, primary_key=True)
def __init__(self, **kwargs):
super(IOMixedInClass, self).__init__(**kwargs)
IOMixin.__init__(self, **kwargs)
class IOMixedInClass2(Base, IOMixin):
"""A class which is mixed with IOMixin for testing purposes
"""
__tablename__ = "IOMixClasses2"
IOMixClass_id = Column("id", Integer, primary_key=True)
def __init__(self, **kwargs):
super(IOMixedInClass2, self).__init__(**kwargs)
IOMixin.__init__(self, **kwargs)
class IOMixinTester(unittest.TestCase):
"""tests the oyProjectManager.models.mixins.IOMixin class
"""
def setUp(self):
"""set up the test
"""
conf.database_url = "sqlite://"
# create the environment variable and point it to a temp directory
self.temp_config_folder = tempfile.mkdtemp()
self.temp_projects_folder = tempfile.mkdtemp()
os.environ["OYPROJECTMANAGER_PATH"] = self.temp_config_folder
os.environ[conf.repository_env_key] = self.temp_projects_folder
self.inputs = [
FileLink(
filename="a.%03d.tga 5-14 100",
path="/tmp"
),
FileLink(
filename='b.%03d.tga 1-100',
path="/tmp"
)
]
self.outputs = [
FileLink(
filename='Test_Proj_Test_Seq_SH001_MAIN_Lighting_beauty_MasterBeauty.%03d.exr 1-100',
path='/tmp'
),
FileLink(
filename='Test_Proj_Test_Seq_SH001_MAIN_Lighting_shadow_MasterBeauty.%03d.exr 1-100',
path='/tmp'
),
]
self.kwargs = {
"inputs": self.inputs,
"outputs": self.outputs,
}
self.test_io_mixed_in_obj = IOMixedInClass(**self.kwargs)
def tearDown(self):
"""clean up the test
"""
# set the db.session to None
db.session = None
# delete the temp folders
shutil.rmtree(self.temp_config_folder)
shutil.rmtree(self.temp_projects_folder)
def test_inputs_argument_is_skipped(self):
"""testing if skipping the inputs argument will set the inputs to an
empty list
"""
self.kwargs.pop('inputs')
new_obj = IOMixedInClass(**self.kwargs)
self.assertEqual(new_obj.inputs, [])
def test_inputs_argument_is_None(self):
"""testing if a TypeError will be raised when the inputs argument is
set to None
"""
self.kwargs['inputs'] = None
self.assertRaises(TypeError, IOMixedInClass, **self.kwargs)
def test_inputs_attribute_is_None(self):
"""testing if a TypeError will be raised when the inputs attribute is
set to None
"""
self.assertRaises(
TypeError,
setattr,
self.test_io_mixed_in_obj.inputs,
None
)
def test_inputs_argument_is_not_a_list_instance(self):
"""testing if a TypeError will be raised when the inputs argument is
not a list
"""
self.kwargs['inputs'] = 'not a list instance'
self.assertRaises(TypeError, IOMixedInClass, **self.kwargs)
def test_inputs_attribute_is_not_a_list_instance(self):
"""testing if a TypeError will be raised when the inputs attribute is
set to something other than a list
"""
self.assertRaises(TypeError, setattr, self.test_io_mixed_in_obj,
'inputs', 'not a list instance')
def test_inputs_argument_is_not_a_list_of_FileLink_instances(self):
"""testing if a TypeError will be raised when the inputs argument is
not a list of all FileLink instances
"""
self.kwargs['inputs'] = ['these', 'are', 'not', 'a', 'FileLink',
'instances']
self.assertRaises(TypeError, IOMixedInClass, **self.kwargs)
def test_inputs_attribute_is_not_a_list_of_FileLink_instances(self):
"""testing if a TypeError will be raised when the inputs attribute is
not set to a list of all FileLink instances
"""
self.assertRaises(
TypeError,
setattr,
self.test_io_mixed_in_obj, 'inputs',
['these', 'are', 'not', 'a', 'FileLink', 'instances']
)
def test_inputs_argument_is_working_properly(self):
"""testing if the inputs argument value is passed to inputs attribute
correctly
"""
self.assertEqual(
self.test_io_mixed_in_obj.inputs,
self.kwargs['inputs']
)
def test_inputs_attribute_is_working_properly(self):
"""testing if the inputs attribute is working properly
"""
new_FileLinks = [
FileLink('test.tga', '/tmp')
]
self.test_io_mixed_in_obj.inputs = new_FileLinks
self.assertEqual(
self.test_io_mixed_in_obj.inputs,
new_FileLinks
)
def test_outputs_argument_is_skipped(self):
"""testing if skipping the outputs argument will set the outputs to an
empty list
"""
self.kwargs.pop('outputs')
new_obj = IOMixedInClass(**self.kwargs)
self.assertEqual(new_obj.outputs, [])
def test_outputs_argument_is_None(self):
"""testing if a TypeError will be raised when the outputs argument is
set to None
"""
self.kwargs['outputs'] = None
self.assertRaises(TypeError, IOMixedInClass, **self.kwargs)
def test_outputs_attribute_is_None(self):
"""testing if a TypeError will be raised when the outputs attribute is
set to None
"""
self.assertRaises(
TypeError,
setattr,
self.test_io_mixed_in_obj.outputs,
None
)
def test_outputs_argument_is_not_a_list_instance(self):
"""testing if a TypeError will be raised when the outputs argument is
not a list
"""
self.kwargs['outputs'] = 'not a list instance'
self.assertRaises(TypeError, IOMixedInClass, **self.kwargs)
def test_outputs_attribute_is_not_a_list_instance(self):
"""testing if a TypeError will be raised when the outputs attribute is
set to something other than a list
"""
self.assertRaises(TypeError, setattr, self.test_io_mixed_in_obj,
'outputs', 'not a list instance')
def test_outputs_argument_is_not_a_list_of_FileLink_instances(self):
"""testing if a TypeError will be raised when the outputs argument is
not a list of all FileLink instances
"""
self.kwargs['outputs'] = ['these', 'are', 'not', 'a', 'FileLink',
'instances']
self.assertRaises(TypeError, IOMixedInClass, **self.kwargs)
def test_outputs_attribute_is_not_a_list_of_FileLink_instances(self):
"""testing if a TypeError will be raised when the outputs attribute is
not set to a list of all FileLink instances
"""
self.assertRaises(
TypeError,
setattr,
self.test_io_mixed_in_obj, 'outputs',
['these', 'are', 'not', 'a', 'FileLink', 'instances']
)
def test_outputs_argument_is_working_properly(self):
"""testing if the outputs argument value is passed to outputs attribute
correctly
"""
self.assertEqual(
self.test_io_mixed_in_obj.outputs,
self.kwargs['outputs']
)
def test_outputs_attribute_is_working_properly(self):
"""testing if the outputs attribute is working properly
"""
new_FileLinks = [
FileLink('test.tga', '/tmp')
]
self.test_io_mixed_in_obj.outputs = new_FileLinks
self.assertEqual(
self.test_io_mixed_in_obj.outputs,
new_FileLinks
)
class IOMixin_DB_Tester(unittest.TestCase):
"""tests IOMixin in a persistent environment
"""
def setUp(self):
"""set up the test
"""
conf.database_url = "sqlite://"
# create the environment variable and point it to a temp directory
self.temp_config_folder = tempfile.mkdtemp()
self.temp_projects_folder = tempfile.mkdtemp()
os.environ["OYPROJECTMANAGER_PATH"] = self.temp_config_folder
os.environ[conf.repository_env_key] = self.temp_projects_folder
self.inputs = [
FileLink(
filename="a.%03d.tga 5-14 100",
path="/tmp"
),
FileLink(
filename='b.%03d.tga 1-100',
path="/tmp"
)
]
self.outputs = [
FileLink(
filename='Test_Proj_Test_Seq_SH001_MAIN_Lighting_beauty_MasterBeauty.%03d.exr 1-100',
path='/tmp'
),
FileLink(
filename='Test_Proj_Test_Seq_SH001_MAIN_Lighting_shadow_MasterBeauty.%03d.exr 1-100',
path='/tmp'
),
]
self.kwargs = {
"inputs": self.inputs,
"outputs": self.outputs,
}
self.test_io_mixed_in_obj = IOMixedInClass(**self.kwargs)
def tearDown(self):
"""clean up the test
"""
# set the db.session to None
db.session = None
# delete the temp folders
shutil.rmtree(self.temp_config_folder)
shutil.rmtree(self.temp_projects_folder)
def test_persistence_of_IOMixin(self):
"""testing the persistence of IOMixedInClass
"""
db.setup()
db.session.add(self.test_io_mixed_in_obj)
db.session.commit()
# now delete the object and try to retrieve it back
del self.test_io_mixed_in_obj
io_mixed_in_obj_DB = db.session.query(IOMixedInClass).first()
# check the attributes
self.assertEqual(
io_mixed_in_obj_DB.inputs, self.kwargs['inputs']
)
self.assertEqual(
io_mixed_in_obj_DB.outputs, self.kwargs['outputs']
)
def test_another_class_mixed_in_with_IOMixin(self):
"""testing if everything works properly if more than one class is mixed
in with the IOMixin
"""
db.setup()
new_io_mixed_in_obj2 = IOMixedInClass2(**self.kwargs)
db.session.add(self.test_io_mixed_in_obj)
db.session.add(new_io_mixed_in_obj2)
db.session.commit()
# delete them and retrieve back from DB
del new_io_mixed_in_obj2
del self.test_io_mixed_in_obj
a = db.query(IOMixedInClass).first()
b = db.query(IOMixedInClass2).first()
self.assertEqual(a.inputs, self.kwargs['inputs'])
self.assertEqual(a.outputs, self.kwargs['outputs'])
self.assertEqual(b.inputs, self.kwargs['inputs'])
self.assertEqual(b.outputs, self.kwargs['outputs'])
|
[
"eoyilmaz@gmail.com"
] |
eoyilmaz@gmail.com
|
a74b1db5a96cc5517f157ef2a3dd75e49245b3eb
|
06e34e2dface0b87fa785cab7e65422a5f20ba18
|
/Solutions/165-Compare-Version-Numbers/python.py
|
048bbf21130c6b62a12402c9c0159f5b6c6472cd
|
[] |
no_license
|
JerryHu1994/LeetCode-Practice
|
c9841b0ce70451c19c8a429a3898c05b6233e1d4
|
b0ce69985c51a9a794397cd98a996fca0e91d7d1
|
refs/heads/master
| 2022-02-10T04:42:28.033364
| 2022-01-02T04:44:22
| 2022-01-02T04:44:22
| 117,118,143
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 726
|
py
|
class Solution(object):
def compareVersion(self, version1, version2):
"""
:type version1: str
:type version2: str
:rtype: int
"""
v1list, v2list = [int(i) for i in version1.split(".")], [int(i) for i in version2.split(".")]
cmplen = min(len(v1list), len(v2list))
for i in range(cmplen):
if v1list[i] > v2list[i]:
return 1
elif v1list[i] < v2list[i]:
return -1
if len(v1list) == len(v2list): return 0
longer = 1 if len(v1list) > len(v2list) else -1
remain = v1list[cmplen:] + v2list[cmplen:]
return 0 if all([i==0 for i in remain]) else longer
|
[
"hjr01211@gmail.com"
] |
hjr01211@gmail.com
|
9c4db63f3fccff9b34f02d52409ad971da14fb48
|
069a4ac8e931b77571f90fcc845b2c88ce18e069
|
/Chapter5/stack_queue_notes.py
|
4b2ebe26b0e772e5387d2cc623594c35e635490c
|
[
"Apache-2.0"
] |
permissive
|
qimanchen/Algorithm_Python
|
0f60c38e01119d7a99469f76194fdeb363008229
|
72eabb5fcc9fafb17172879c1250d3c9553e583d
|
refs/heads/master
| 2020-04-28T17:16:28.520904
| 2020-01-31T07:27:48
| 2020-01-31T07:27:48
| 175,440,054
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,233
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
栈和队列:
容器 包含 元素(其他数据结构)
只支持数据项的存储和访问,不支持数据项之间的任何关系
最重要的功能: 元素的存入和取出
两种访问顺序:
先进先出
后进先出
栈概念:
元素之间只有时间的先后顺序关系,而无其他关系
后进先出
应用:
前缀表达式: 每个运算符的运算对象,就是它后面出现的几个完整表达式
后缀表达式: 与前面相反
栈与函数调用:
1、进入新的函数调用之前,保存一些信息 -- 函数调用的前序动作
2、退出上一次函数调用,需要恢复调用前的状态 -- 函数调用的后序动作
因此函数调用是有代价的
任何一个递归定义的函数,都可以通过引入一个栈保存中间结果的方式,翻译为一个非递归的过程
递归 -- 涉及函数的调用(消耗资源)
转化
非递归 -- 减少函数调用的开销
任何包含循环的程序翻译为不包含循环的递归定义
队列:
queue -- 容器
单链表可以直接实现 -- 先进先出(直接首端操作)
假性溢出
通过顺序表实现队列 -- 通过循环队列实现
简单实现通过固定大小的list
数据不变式:维护对象属性间的正确关系
基于栈的搜索 -- 深度优先搜索 -- 单条路径找个遍
基于队列的搜索 -- 广度优先搜索 -- 多条路径的进行
深度优先:
总是沿着遇到的搜索路径一路前行
当分支节点对不同分支的选择非常重要;问题简单,没有其他额外的帮助信息
状态空间小时使用
解:
可以通过栈来保存
广度优先:
只要存在达解的有穷长路径 -- 必定找到最短的路径(最近的解)
解:
需要额外的方法进行记录
时间开销 -- 访问的状态个数
几种特殊的栈与对列:
1、双端对列 --- python 中的collections包中定义了一种deque类型 -- python版的双端队列
链接表带来灵活性,但是失去了一定的效率
cpu需要整块的分级缓存单元
"""
|
[
"1033178199@qq.com"
] |
1033178199@qq.com
|
aa9c859721f3cf61a743cb7f21d7af7caf49b2b0
|
cf2959812b89424dfc537d3df4b4e64f7b50cd60
|
/tests/test_issues.py
|
881b8fd4a50a188b5e17b383c2be12b212cd3ef5
|
[
"Apache-2.0"
] |
permissive
|
Pandziura/PyRFC
|
1c91808f6897d56743a5a7e66fb24e938c672960
|
0718eeb73d45732283d117f33e3395d35e4b2795
|
refs/heads/master
| 2020-03-23T04:54:20.141190
| 2018-07-13T13:20:25
| 2018-07-13T13:20:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,502
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import socket
import pyrfc
import pytest
from tests.config import PARAMS as params, CONFIG_SECTIONS as config_sections, get_error
def utf8len(s):
return len(s.encode('utf-8'))
class TestIssues():
def setup_method(self, test_method):
""" A connection to an SAP backend system
Instantiating an :class:`pyrfc.Connection` object will
automatically attempt to open a connection the SAP backend.
:param config: Configuration of the instance. Allowed keys are:
``dtime``
returns datetime types (accepts strings and datetimes), default is False
``rstrip``
right strips strings returned from RFC call (default is True)
``return_import_params``
importing parameters are returned by the RFC call (default is False)
:type config: dict or None (default)
"""
self.conn = pyrfc.Connection(**params)
assert self.conn.alive
def test_info(self):
connection_info = self.conn.get_connection_attributes()
assert connection_info['isoLanguage'] == u'EN'
def teardown_method(self, test_method):
self.conn.close()
assert not self.conn.alive
def test_issue31(self):
"""
This test cases covers the issue 31
"""
'''
filename = 'tests/data/issue31/rfcexec.exe'
block = 1024
with open(filename, 'rb') as file1:
send = file1.read()
send_content = [{'': bytearray(send[i:i+block])} for i in range(0, len(send), block)]
result = self.conn.call('ZTEST_RAW_TABLE', TT_TBL1024=send_content)
content = bytearray()
for line in send_content:
content += line['']
assert send == content
received_content = bytearray()
for line in result['TT_TBL1024']:
received_content += line['LINE']
assert type(content) is bytearray
assert type(content) == type(received_content)
received_content = received_content[:len(content)]
assert len(content) == len(received_content)
assert content == received_content
'''
def test_issue38(self):
test = [
'string',
u'四周远处都能望见',
u'\U0001F4AA',
u'\u0001\uf4aa',
u'a\xac\u1234\u20ac\U0001F4AA'
]
for s in test:
is_input = {'ZSHLP_MAT1': s, 'ZFLTP': 123.45}
result = self.conn.call('/COE/RBP_FE_DATATYPES', IS_INPUT = is_input)['ES_OUTPUT']
assert is_input['ZSHLP_MAT1'] == result['ZSHLP_MAT1']
def test_issue40(self):
'''
# put in cache
result = self.conn.call('BAPI_USER_GET_DETAIL', USERNAME="DEMO")
# get from cache
fd = self.conn.func_desc_get_cached('S16', 'BAPI_USER_GET_DETAIL')
assert fd.__class__ is pyrfc._pyrfc.FunctionDescription
# remove from cache
self.conn.func_desc_remove('S16', 'BAPI_USER_GET_DETAIL')
try:
fd = self.conn.func_desc_get_cached('S16', 'BAPI_USER_GET_DETAIL')
assert fd.__class__ is not 'pyrfc._pyrfc.FunctionDescription'
except pyrfc.RFCError as ex:
error = get_error(ex)
assert error['code'] == 17
assert error['key'] == 'RFC_NOT_FOUND'
'''
|
[
"srdjan.boskovic@sap.com"
] |
srdjan.boskovic@sap.com
|
e92416720e9d10d9c0f591929294d10b632b2e17
|
77b16dcd465b497c22cf3c096fa5c7d887d9b0c2
|
/Francisco_Trujillo/Assignments/flaskolympics/olympics6/server.py
|
29f43c7f8562278fb156e038802a32014712f696
|
[
"MIT"
] |
permissive
|
curest0x1021/Python-Django-Web
|
a7cf8a45e0b924ce23791c18f6a6fb3732c36322
|
6264bc4c90ef1432ba0902c76b567cf3caaae221
|
refs/heads/master
| 2020-04-26T17:14:20.277967
| 2016-10-18T21:54:39
| 2016-10-18T21:54:39
| 173,706,702
| 6
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 777
|
py
|
from flask import Flask, render_template, session, request, redirect
import random
app = Flask(__name__)
app.secret_key = 'my_secret_key'
@app.route('/')
def index():
if not 'gold' in session:
session['gold'] = 0
if not 'activities' in session:
session['activities'] = []
return render_template('index.html')
@app.route('/process', methods = ['POST'])
def process():
buildings = {
'farm':random.randint(5,10),
'casino':random.randint(-50,50),
'cave':random.randint(0,30),
'house':random.randint(0,5)
}
return redirect('/')
if __name__ == '__main__':
app.run(debug = True)
"""
Will this work?
at first we would import the random from function to work, but nothing would happen to session data.
"""
|
[
"43941751+curest0x1021@users.noreply.github.com"
] |
43941751+curest0x1021@users.noreply.github.com
|
c6c5b98d66abced8eee513b103a30429094cface
|
6879a8596df6f302c63966a2d27f6b4d11cc9b29
|
/abc/problems030/028/c.py
|
33699ae14e6f2767c00684d9ab5c770609c9fcae
|
[] |
no_license
|
wkwkgg/atcoder
|
41b1e02b88bf7a8291b709306e54cb56cb93e52a
|
28a7d4084a4100236510c05a88e50aa0403ac7cd
|
refs/heads/master
| 2020-07-26T03:47:19.460049
| 2020-03-01T18:29:57
| 2020-03-01T18:29:57
| 208,523,188
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 215
|
py
|
# ABC028 : C - 数を3つ選ぶマン
from itertools import combinations
ins = list(map(int, input().split()))
res = []
for xs in combinations(ins, 3):
res.append(sum(xs))
print(sorted(res, reverse=True)[2])
|
[
"yujin@komachi.live"
] |
yujin@komachi.live
|
022c0a5b59b6ab927a9a5f4463dd7ea34fc79202
|
3624e9f0a026b57ebdafa4e842b93f56e5a8504d
|
/Codeforces/54 Beta Division 2/Problem A/A.py
|
e71f611141ffce1faa0d2fef75b77557ad7af796
|
[
"MIT"
] |
permissive
|
ailyanlu1/Competitive-Programming-2
|
54109c8644d3ac02715dc4570916b212412c25c0
|
6c990656178fb0cd33354cbe5508164207012f24
|
refs/heads/master
| 2020-03-23T07:48:20.560283
| 2018-02-15T06:49:49
| 2018-02-15T06:49:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 365
|
py
|
s = raw_input()
l = len(s)
if 'h' in s:
i = s.index('h')
s = s[i+1:]
if 'e' in s:
i = s.index('e')
s = s[i+1:]
if 'l' in s:
i = s.index('l')
s = s[i+1:]
if 'l' in s:
i = s.index('l')
s = s[i+1:]
if 'o' in s:
print "YES"
else:
print "NO"
else:
print "NO"
else:
print "NO"
else:
print "NO"
else:
print "NO"
|
[
"adityapaliwal95@gmail.com"
] |
adityapaliwal95@gmail.com
|
10a59fb483b870d8565088d936df3af229200397
|
e5e2b7da41fda915cb849f031a0223e2ac354066
|
/sdk/python/pulumi_azure_native/servicebus/v20180101preview/namespace_authorization_rule.py
|
dae834124429d04d4271d6518385cfa5be49f153
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
johnbirdau/pulumi-azure-native
|
b7d3bdddeb7c4b319a7e43a892ddc6e25e3bfb25
|
d676cc331caa0694d8be99cb90b93fa231e3c705
|
refs/heads/master
| 2023-05-06T06:48:05.040357
| 2021-06-01T20:42:38
| 2021-06-01T20:42:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,831
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from ._enums import *
__all__ = ['NamespaceAuthorizationRuleArgs', 'NamespaceAuthorizationRule']
@pulumi.input_type
class NamespaceAuthorizationRuleArgs:
def __init__(__self__, *,
namespace_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
rights: pulumi.Input[Sequence[pulumi.Input['AccessRights']]],
authorization_rule_name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a NamespaceAuthorizationRule resource.
:param pulumi.Input[str] namespace_name: The namespace name
:param pulumi.Input[str] resource_group_name: Name of the Resource group within the Azure subscription.
:param pulumi.Input[Sequence[pulumi.Input['AccessRights']]] rights: The rights associated with the rule.
:param pulumi.Input[str] authorization_rule_name: The authorization rule name.
"""
pulumi.set(__self__, "namespace_name", namespace_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "rights", rights)
if authorization_rule_name is not None:
pulumi.set(__self__, "authorization_rule_name", authorization_rule_name)
@property
@pulumi.getter(name="namespaceName")
def namespace_name(self) -> pulumi.Input[str]:
"""
The namespace name
"""
return pulumi.get(self, "namespace_name")
@namespace_name.setter
def namespace_name(self, value: pulumi.Input[str]):
pulumi.set(self, "namespace_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
Name of the Resource group within the Azure subscription.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def rights(self) -> pulumi.Input[Sequence[pulumi.Input['AccessRights']]]:
"""
The rights associated with the rule.
"""
return pulumi.get(self, "rights")
@rights.setter
def rights(self, value: pulumi.Input[Sequence[pulumi.Input['AccessRights']]]):
pulumi.set(self, "rights", value)
@property
@pulumi.getter(name="authorizationRuleName")
def authorization_rule_name(self) -> Optional[pulumi.Input[str]]:
"""
The authorization rule name.
"""
return pulumi.get(self, "authorization_rule_name")
@authorization_rule_name.setter
def authorization_rule_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "authorization_rule_name", value)
class NamespaceAuthorizationRule(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
authorization_rule_name: Optional[pulumi.Input[str]] = None,
namespace_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
rights: Optional[pulumi.Input[Sequence[pulumi.Input['AccessRights']]]] = None,
__props__=None):
"""
Description of a namespace authorization rule.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] authorization_rule_name: The authorization rule name.
:param pulumi.Input[str] namespace_name: The namespace name
:param pulumi.Input[str] resource_group_name: Name of the Resource group within the Azure subscription.
:param pulumi.Input[Sequence[pulumi.Input['AccessRights']]] rights: The rights associated with the rule.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: NamespaceAuthorizationRuleArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Description of a namespace authorization rule.
:param str resource_name: The name of the resource.
:param NamespaceAuthorizationRuleArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(NamespaceAuthorizationRuleArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
authorization_rule_name: Optional[pulumi.Input[str]] = None,
namespace_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
rights: Optional[pulumi.Input[Sequence[pulumi.Input['AccessRights']]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = NamespaceAuthorizationRuleArgs.__new__(NamespaceAuthorizationRuleArgs)
__props__.__dict__["authorization_rule_name"] = authorization_rule_name
if namespace_name is None and not opts.urn:
raise TypeError("Missing required property 'namespace_name'")
__props__.__dict__["namespace_name"] = namespace_name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if rights is None and not opts.urn:
raise TypeError("Missing required property 'rights'")
__props__.__dict__["rights"] = rights
__props__.__dict__["name"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:servicebus/v20180101preview:NamespaceAuthorizationRule"), pulumi.Alias(type_="azure-native:servicebus:NamespaceAuthorizationRule"), pulumi.Alias(type_="azure-nextgen:servicebus:NamespaceAuthorizationRule"), pulumi.Alias(type_="azure-native:servicebus/v20140901:NamespaceAuthorizationRule"), pulumi.Alias(type_="azure-nextgen:servicebus/v20140901:NamespaceAuthorizationRule"), pulumi.Alias(type_="azure-native:servicebus/v20150801:NamespaceAuthorizationRule"), pulumi.Alias(type_="azure-nextgen:servicebus/v20150801:NamespaceAuthorizationRule"), pulumi.Alias(type_="azure-native:servicebus/v20170401:NamespaceAuthorizationRule"), pulumi.Alias(type_="azure-nextgen:servicebus/v20170401:NamespaceAuthorizationRule"), pulumi.Alias(type_="azure-native:servicebus/v20210101preview:NamespaceAuthorizationRule"), pulumi.Alias(type_="azure-nextgen:servicebus/v20210101preview:NamespaceAuthorizationRule")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(NamespaceAuthorizationRule, __self__).__init__(
'azure-native:servicebus/v20180101preview:NamespaceAuthorizationRule',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'NamespaceAuthorizationRule':
"""
Get an existing NamespaceAuthorizationRule resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = NamespaceAuthorizationRuleArgs.__new__(NamespaceAuthorizationRuleArgs)
__props__.__dict__["name"] = None
__props__.__dict__["rights"] = None
__props__.__dict__["type"] = None
return NamespaceAuthorizationRule(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def rights(self) -> pulumi.Output[Sequence[str]]:
"""
The rights associated with the rule.
"""
return pulumi.get(self, "rights")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type
"""
return pulumi.get(self, "type")
|
[
"noreply@github.com"
] |
johnbirdau.noreply@github.com
|
a03cd0b74173e423e1504dfeef024bbbf613678d
|
90419da201cd4948a27d3612f0b482c68026c96f
|
/sdk/python/pulumi_azure_nextgen/eventhub/v20150801/event_hub.py
|
c58558ab4ee10df9503de6c7df040f6b0f34cf5d
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
test-wiz-sec/pulumi-azure-nextgen
|
cd4bee5d70cb0d332c04f16bb54e17d016d2adaf
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
refs/heads/master
| 2023-06-08T02:35:52.639773
| 2020-11-06T22:39:06
| 2020-11-06T22:39:06
| 312,993,761
| 0
| 0
|
Apache-2.0
| 2023-06-02T06:47:28
| 2020-11-15T09:04:00
| null |
UTF-8
|
Python
| false
| false
| 7,517
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = ['EventHub']
class EventHub(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
event_hub_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
message_retention_in_days: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
namespace_name: Optional[pulumi.Input[str]] = None,
partition_count: Optional[pulumi.Input[int]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Single item in List or Get Event Hub operation
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] event_hub_name: The Event Hub name
:param pulumi.Input[str] location: Location of the resource.
:param pulumi.Input[int] message_retention_in_days: Number of days to retain the events for this Event Hub.
:param pulumi.Input[str] name: Name of the Event Hub.
:param pulumi.Input[str] namespace_name: The Namespace name
:param pulumi.Input[int] partition_count: Number of partitions created for the Event Hub.
:param pulumi.Input[str] resource_group_name: Name of the resource group within the azure subscription.
:param pulumi.Input[str] status: Enumerates the possible values for the status of the Event Hub.
:param pulumi.Input[str] type: ARM type of the Namespace.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if event_hub_name is None:
raise TypeError("Missing required property 'event_hub_name'")
__props__['event_hub_name'] = event_hub_name
if location is None:
raise TypeError("Missing required property 'location'")
__props__['location'] = location
__props__['message_retention_in_days'] = message_retention_in_days
__props__['name'] = name
if namespace_name is None:
raise TypeError("Missing required property 'namespace_name'")
__props__['namespace_name'] = namespace_name
__props__['partition_count'] = partition_count
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['status'] = status
__props__['type'] = type
__props__['created_at'] = None
__props__['partition_ids'] = None
__props__['updated_at'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:eventhub/latest:EventHub"), pulumi.Alias(type_="azure-nextgen:eventhub/v20140901:EventHub"), pulumi.Alias(type_="azure-nextgen:eventhub/v20170401:EventHub")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(EventHub, __self__).__init__(
'azure-nextgen:eventhub/v20150801:EventHub',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'EventHub':
"""
Get an existing EventHub resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return EventHub(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> pulumi.Output[str]:
"""
Exact time the Event Hub was created.
"""
return pulumi.get(self, "created_at")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="messageRetentionInDays")
def message_retention_in_days(self) -> pulumi.Output[Optional[int]]:
"""
Number of days to retain the events for this Event Hub.
"""
return pulumi.get(self, "message_retention_in_days")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="partitionCount")
def partition_count(self) -> pulumi.Output[Optional[int]]:
"""
Number of partitions created for the Event Hub.
"""
return pulumi.get(self, "partition_count")
@property
@pulumi.getter(name="partitionIds")
def partition_ids(self) -> pulumi.Output[Sequence[str]]:
"""
Current number of shards on the Event Hub.
"""
return pulumi.get(self, "partition_ids")
@property
@pulumi.getter
def status(self) -> pulumi.Output[Optional[str]]:
"""
Enumerates the possible values for the status of the Event Hub.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="updatedAt")
def updated_at(self) -> pulumi.Output[str]:
"""
The exact time the message was updated.
"""
return pulumi.get(self, "updated_at")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
[
"public@paulstack.co.uk"
] |
public@paulstack.co.uk
|
5666f7847d1dd2aede0d1d0572c957bc26bf50ed
|
14d940630ab365be939fc08d3d95b0a98789bae7
|
/lab103_robo_testes.py
|
53c976a5f967be537db278f7a9d6ac845b034565
|
[] |
no_license
|
accolombini/python_completo
|
1da6f58f0c57b978d70582d96dc12b80c2d5b8a8
|
935102173a1112273b09734392dca08d76e9c749
|
refs/heads/master
| 2023-01-09T07:51:15.494101
| 2020-10-11T23:39:08
| 2020-10-11T23:39:08
| 283,790,276
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,319
|
py
|
"""
# Motivando -> observe os teste antes de serem rafatorados -> note a repetição dos códigos
class RoboTestes(unittest.TestCase):
def test_carregar(self):
megaman = Robo('Mega Man', bateria=50)
megaman.carregar()
self.assertEqual(megaman.bateria, 100)
def test_dizer_nome(self):
megaman = Robo('Mega Man', bateria=50)
self.assertEqual(megaman.dizer_nome(), 'BEEP BOOP BEEP BOOP. Eu sou MEGA MAN')
self.assertEqual(megaman.bateria, 49, 'A bateria deveria estar em 49%')
"""
import unittest
from lab103_robo import Robo
# Refatorando utilizando setUp() e tearDown(). Note que o serUp() cria um objeto e o deixa
# disponível para todos os métodos
class RoboTestes(unittest.TestCase):
def setUp(self):
self.megaman = Robo('Mega Man', bateria=50)
print(f'setUp() sendo executado ...')
def test_carregar(self):
self.megaman.carregar()
self.assertEqual(self.megaman.bateria, 100)
def test_dizer_nome(self):
self.assertEqual(self.megaman.dizer_nome(), 'BEEP BOOP BEEP BOOP. Eu sou MEGA MAN')
self.assertEqual(self.megaman.bateria, 49, 'A bateria deveria estar em 49%')
def tearDown(self):
print(f'tearDown() sendo executado ...')
if __name__ == '__main__':
unittest.main()
|
[
"accolombini@gmail.com"
] |
accolombini@gmail.com
|
56bae0ea261cac580770b5cc789b04b6b2ad0c17
|
a275cec1fddb6e034b4e9df72f8039536c009990
|
/codes/leetcode/merge-sorted-array.py
|
0ddbcf9899003e159cb71c435e5f14e50ece09e7
|
[] |
no_license
|
taoste/dirtysalt
|
a3cbd16710c81de65f00aa919f4e67a1fc66d226
|
bd68294fb7727d598ea1c8bf0a559247e07c1aea
|
refs/heads/master
| 2021-08-24T10:44:47.607924
| 2017-12-09T08:47:12
| 2017-12-09T08:47:12
| 113,807,519
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 795
|
py
|
#!/usr/bin/env python
# coding:utf-8
# Copyright (C) dirlt
class Solution(object):
def merge(self, nums1, m, nums2, n):
"""
:type nums1: List[int]
:type m: int
:type nums2: List[int]
:type n: int
:rtype: void Do not return anything, modify nums1 in-place instead.
"""
i = m - 1
j = n - 1
k = m + n - 1
while i >= 0 and j >= 0:
if nums1[i] > nums2[j]:
nums1[k] = nums1[i]
i -= 1
else:
nums1[k] = nums2[j]
j -= 1
k -= 1
while i >= 0:
nums1[k] = nums1[i]
i -= 1
k -= 1
while j >= 0:
nums1[k] = nums2[j]
j -= 1
k -= 1
|
[
"dirtysalt1987@gmail.com"
] |
dirtysalt1987@gmail.com
|
d7223e33cf1a53d89ca0729366e2b0ddfc6f1740
|
d2e82d8bc2a4604b6e734f7521ddae2716486b96
|
/20190521/futureData_model4/record/draw_record.py
|
bf0bc758b03291cdde27003399b6195b2623e921
|
[] |
no_license
|
JudyPhy/spider
|
af74dbf8b74b335b64247b382e73b669796e5c1a
|
eb32aab272269f13a97ecea17eb6135f9e7e3d49
|
refs/heads/master
| 2021-07-05T19:07:50.427310
| 2020-08-14T09:29:49
| 2020-08-14T09:29:49
| 159,917,490
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,312
|
py
|
from common import common
def __getDrawRecords(draw, history_raceResults_rows):
draw_records = [0, 0, 0, 0, 0] # [No1, No2, No3, No4, All]
for race_date_No, dict in history_raceResults_rows.items():
for horse_code, row in dict.items():
plc = row['plc'].replace('DH', '')
cur_draw = row['draw']
if (plc not in common.words) and (int(cur_draw) == draw):
draw_records[4] += 1
if int(plc) == 1:
draw_records[0] += 1
elif int(plc) == 2:
draw_records[1] += 1
elif int(plc) == 3:
draw_records[2] += 1
elif int(plc) == 4:
draw_records[3] += 1
return draw_records
def GetDrawRecord(future_raceCard_rows, history_raceResults_rows):
draw_record_dict = {} # draw & [No1, No2, No3, No4, All]
draw_list = []
for race_date_No, dict in future_raceCard_rows.items():
for horse_No, row in dict.items():
draw = int(row['draw'])
if draw not in draw_list:
draw_list.append(draw)
for draw in draw_list:
draw_record_dict[draw] = __getDrawRecords(draw, history_raceResults_rows)
return draw_record_dict
|
[
"pujie@chinatsp.com"
] |
pujie@chinatsp.com
|
97c069ca064fb33b7d4719f5d2073453efe1716a
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_296/ch6_2019_04_22_18_58_02_325401.py
|
499aadc8a44219fa30fdbadc9ec1e86a5ba174aa
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 198
|
py
|
def encontra_maximo(a):
i = 0
b = 0
while i < len(a):
if a[i] >= a[i-1]:
b = a[i]
i += 1
return b
lista = []
print(encontra_maximo(lista))
|
[
"you@example.com"
] |
you@example.com
|
0805d0d3bab7db888f1100aaa875e33667988239
|
1f7fce552cc68731f683ded3f831e8f4650c7197
|
/Axis16/main/migrations/0009_kartavyaregistration.py
|
1be3f60f6b5585457a1db0143d9da9c6a1c78c22
|
[] |
no_license
|
tanaypatil/axis-website
|
3985068cf1c52bb038b7174cbdf938b8b4084c03
|
b5eda2906150a38b1bb0daf8b23c9194572b849c
|
refs/heads/master
| 2020-06-13T03:14:05.855948
| 2019-06-30T13:12:11
| 2019-06-30T13:12:11
| 194,514,303
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,426
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-09-15 08:24
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0008_ornithoregistration'),
]
operations = [
migrations.CreateModel(
name='KartavyaRegistration',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('idnum', models.CharField(default=None, max_length=20)),
('team', models.CharField(default=None, max_length=20, unique=True)),
('fname', models.CharField(max_length=40)),
('fcollege', models.CharField(max_length=60)),
('fmail', models.EmailField(default=None, max_length=254, unique=True)),
('fcon', models.CharField(default=None, max_length=12, unique=True, validators=[django.core.validators.RegexValidator('^[0-9]+$', 'Enter a valid phone number.')])),
('fcity', models.CharField(max_length=12, null=True)),
('sname', models.CharField(blank=True, default=None, max_length=40, null=True)),
('scollege', models.CharField(blank=True, default=None, max_length=60, null=True)),
('smail', models.EmailField(blank=True, default=None, max_length=254, null=True)),
('scon', models.CharField(blank=True, default=None, max_length=12, null=True, validators=[django.core.validators.RegexValidator('^[0-9]+$', 'Enter a valid phone number.')])),
('scity', models.CharField(blank=True, default=None, max_length=12, null=True)),
('tname', models.CharField(blank=True, default=None, max_length=40, null=True)),
('tcollege', models.CharField(blank=True, default=None, max_length=60, null=True)),
('tmail', models.EmailField(blank=True, default=None, max_length=254, null=True)),
('tcon', models.CharField(blank=True, default=None, max_length=12, null=True, validators=[django.core.validators.RegexValidator('^[0-9]+$', 'Enter a valid phone number.')])),
('tcity', models.CharField(blank=True, default=None, max_length=12, null=True)),
('date_created', models.DateTimeField(auto_now_add=True)),
],
),
]
|
[
"tanaypatil197@gmail.com"
] |
tanaypatil197@gmail.com
|
6b785ccf6de401b6de6be7b80935b38f0153b522
|
8a6cf531ed72310d7114237407302ef075171937
|
/ores/ores.py
|
8b81ab5f852c87b9c7043f4eebd231d7dbbbe645
|
[
"MIT"
] |
permissive
|
ureesoriano/ores
|
64a7f3c8a8917fe33449302c55cff23952a5719c
|
dda9db6c8737d12acbae5b0d43938d93c9e7ea8e
|
refs/heads/master
| 2020-03-17T21:54:12.610518
| 2018-05-20T08:36:13
| 2018-05-20T08:36:13
| 133,980,352
| 0
| 0
|
MIT
| 2018-05-18T16:43:18
| 2018-05-18T16:43:18
| null |
UTF-8
|
Python
| false
| false
| 1,552
|
py
|
"""
This script provides access to a set of utilities for ORES
* precached -- Starts a daemon that requests scores for revisions as they happen
* score_revisions -- Scores a set of revisions using an ORES API
* stress_test -- Scores a large set of revisions at a configurable rate
* test_api -- Runs a series of tests against a live ORES API
You can also launch a set of production like applications
* applications.wsgi -- A wsgi server
* applications.celery -- A celery worker
{usage}
Options:
-h | --help Shows this documentation
<utility> The name of the utility to run
"""
import sys
import traceback
from importlib import import_module
USAGE = """Usage:
{progname} (-h | --help)
{progname} <utility> [-h | --help]
""".format(progname=sys.argv[0])
def main():
if len(sys.argv) < 2:
sys.stderr.write(USAGE)
sys.exit(1)
elif sys.argv[1] in ("-h", "--help"):
sys.stderr.write(__doc__.format(usage=USAGE))
sys.exit(1)
elif sys.argv[1][:1] == "-":
sys.stderr.write(USAGE)
sys.exit(1)
module_name = sys.argv[1]
if module_name.find("application") == 0:
module_path = "." + module_name
else:
module_path = ".utilities." + module_name
try:
sys.path.insert(0, ".")
module = import_module(module_path, package="ores")
except ImportError:
sys.stderr.write(traceback.format_exc())
sys.stderr.write("Could not find module {0}.\n".format(module_path))
sys.exit(1)
module.main(sys.argv[2:])
|
[
"aaron.halfaker@gmail.com"
] |
aaron.halfaker@gmail.com
|
f3b095aab2099cf1a4956081a8357cf4160ac645
|
9ac16f3a952475715756cd4985e9355c6c0059b6
|
/docker/app/app/backend/apps/_archive/accounts_new/profiles/models.py
|
ccc1478f984975ead9d71f229d8be54404e828e1
|
[
"BSD-3-Clause",
"ISC"
] |
permissive
|
JTarball/docker-django-polymer-starter-kit
|
14a9900bb1f4402ffffaf8a428fd600d2430d35c
|
b5250030b1646e29567c15d01ba4668c6ad535c9
|
refs/heads/master
| 2021-01-19T22:14:36.890793
| 2015-11-18T18:49:14
| 2015-11-18T18:49:14
| 46,353,679
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,218
|
py
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import AbstractUser
class AccountsUser(AbstractUser):
USERNAME_FIELD = 'username' # name of field on the User that is used as the unique identfier.
activation_key = models.CharField(_('activation key'), max_length=40)
# Extra Profile Fields
is_subscribed = models.BooleanField(_('subscribed'), default=False, help_text=_('Designates whether the user can is subscribed to the newsletter.'))
###########################################################################
# Note Django User has the following fields so dont Duplicate!
###########################################################################
# id
# username
# first_name
# last_name
# email
# password
# is_staff
# is_active
# is_superuser
# last_login
# date_joined
###########################################################################
# future
#bio = models.TextField()
#failed_login_attempts = models.PositiveIntegerField(default=0, editable=False)
#last_login_attempt_ip = models.CharField(default='', max_length=45, editable=False)
|
[
"james.tarball@gmail.com"
] |
james.tarball@gmail.com
|
aeee10979dd5fd6b4e0388c3fdfc64f99aa5f61d
|
9d4ed31ebe11eb3fa40b7ab809e40762446708e0
|
/Python diye Programming sekha 2nd/Tracking mails.py
|
2fe4654d874fd2edd55cfbcbc3d191bb4e519754
|
[
"MIT"
] |
permissive
|
mitul3737/My-Python-Programming-Journey-from-Beginning-to-Data-Sciene-Machine-Learning-AI-Deep-Learning
|
299dc5abbfc98ea66cda94b2b3de31ac32ab2b3c
|
ca2c15c597a64e5a7689ba3a44ce36a1c0828194
|
refs/heads/main
| 2023-04-17T20:49:03.252440
| 2021-05-16T19:07:16
| 2021-05-16T19:07:16
| 360,768,274
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 170
|
py
|
text="Email us for any feedback here: shahriyarmitul3737@gmail.com py.book@subeen.com book_py@subeen.com thank you"
import re
print(re.findall(r'[.\w]+@\w+[.]\w+',text))
|
[
"shahriyarmitul3737@gmail.com"
] |
shahriyarmitul3737@gmail.com
|
df45bd3b7577e0039d27c223860742502e62e05f
|
4ea832d725d820b0c3796a87cdb9f763a8b657cd
|
/MyTensorFlow/utils.py
|
8658fe74d9d950be224ba88447c47f03c2a9b5a1
|
[] |
no_license
|
Ollitros/DataScienceProject
|
07cc6b9577ae63eb3aede152e46d4dd5a07f8a09
|
b14e6add0c929a0820647e8d085e0c1e131d573e
|
refs/heads/master
| 2022-02-28T01:09:43.441967
| 2019-10-05T14:51:47
| 2019-10-05T14:51:47
| 120,162,173
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,778
|
py
|
import numpy
def scale_to_unit_interval(ndar, eps=1e-8):
""" Scales all values in the ndarray ndar to be between 0 and 1 """
ndar = ndar.copy()
ndar -= ndar.min()
ndar *= 1.0 / (ndar.max() + eps)
return ndar
def tile_raster_images(X, img_shape, tile_shape, tile_spacing=(0, 0),
scale_rows_to_unit_interval=True,
output_pixel_vals=True):
"""
Transform an array with one flattened image per row, into an array in
which images are reshaped and layed out like tiles on a floor.
This function is useful for visualizing datasets whose rows are images,
and also columns of matrices for transforming those rows
(such as the first layer of a neural net).
:type X: a 2-D ndarray or a tuple of 4 channels, elements of which can
be 2-D ndarrays or None;
:param X: a 2-D array in which every row is a flattened image.
:type img_shape: tuple; (height, width)
:param img_shape: the original shape of each image
:type tile_shape: tuple; (rows, cols)
:param tile_shape: the number of images to tile (rows, cols)
:param output_pixel_vals: if output should be pixel values (i.e. int8
values) or floats
:param scale_rows_to_unit_interval: if the values need to be scaled before
being plotted to [0,1] or not
:returns: array suitable for viewing as an image.
(See:`Image.fromarray`.)
:rtype: a 2-d array with same dtype as X.
"""
assert len(img_shape) == 2
assert len(tile_shape) == 2
assert len(tile_spacing) == 2
# The expression below can be re-written in a more C style as
# follows :
#
# out_shape = [0,0]
# out_shape[0] = (img_shape[0]+tile_spacing[0])*tile_shape[0] -
# tile_spacing[0]
# out_shape[1] = (img_shape[1]+tile_spacing[1])*tile_shape[1] -
# tile_spacing[1]
out_shape = [
(ishp + tsp) * tshp - tsp
for ishp, tshp, tsp in zip(img_shape, tile_shape, tile_spacing)
]
if isinstance(X, tuple):
assert len(X) == 4
# Create an output numpy ndarray to store the image
if output_pixel_vals:
out_array = numpy.zeros((out_shape[0], out_shape[1], 4),
dtype='uint8')
else:
out_array = numpy.zeros((out_shape[0], out_shape[1], 4),
dtype=X.dtype)
#colors default to 0, alpha defaults to 1 (opaque)
if output_pixel_vals:
channel_defaults = [0, 0, 0, 255]
else:
channel_defaults = [0., 0., 0., 1.]
for i in range(4):
if X[i] is None:
# if channel is None, fill it with zeros of the correct
# dtype
dt = out_array.dtype
if output_pixel_vals:
dt = 'uint8'
out_array[:, :, i] = numpy.zeros(
out_shape,
dtype=dt
) + channel_defaults[i]
else:
# use a recurrent call to compute the channel and store it
# in the output
out_array[:, :, i] = tile_raster_images(
X[i], img_shape, tile_shape, tile_spacing,
scale_rows_to_unit_interval, output_pixel_vals)
return out_array
else:
# if we are dealing with only one channel
H, W = img_shape
Hs, Ws = tile_spacing
# generate a matrix to store the output
dt = X.dtype
if output_pixel_vals:
dt = 'uint8'
out_array = numpy.zeros(out_shape, dtype=dt)
for tile_row in range(tile_shape[0]):
for tile_col in range(tile_shape[1]):
if tile_row * tile_shape[1] + tile_col < X.shape[0]:
this_x = X[tile_row * tile_shape[1] + tile_col]
if scale_rows_to_unit_interval:
# if we should scale values to be between 0 and 1
# do this by calling the `scale_to_unit_interval`
# function
this_img = scale_to_unit_interval(
this_x.reshape(img_shape))
else:
this_img = this_x.reshape(img_shape)
# add the slice to the corresponding position in the
# output array
c = 1
if output_pixel_vals:
c = 255
out_array[
tile_row * (H + Hs): tile_row * (H + Hs) + H,
tile_col * (W + Ws): tile_col * (W + Ws) + W
] = this_img * c
return out_array
|
[
"Ollitros@gmail.com"
] |
Ollitros@gmail.com
|
68be9becb1f195d995195e96a0a50ba5b6a58bce
|
2a788f6e0db36ea2565e6b4b161827e31cc968b7
|
/test/test_convert.py
|
c00c417416589d4931e0e7b63a2f89583b2675de
|
[] |
no_license
|
djairdutra/scan-pdf
|
354626f334d471e5fe4a42fff630ac676f76a325
|
0d2d96a3bbb0b97f01b93c1a290b296e85c21d37
|
refs/heads/master
| 2020-05-28T03:03:36.338043
| 2018-11-07T22:32:43
| 2018-11-07T22:32:43
| 188,862,507
| 0
| 1
| null | 2019-05-27T14:52:05
| 2019-05-27T14:52:04
| null |
UTF-8
|
Python
| false
| false
| 732
|
py
|
import unittest
import mock
import sys
from assertpy import assert_that
class Options(object):
pass
class ConverterTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
sys.modules['subprocess'] = mock.Mock()
def test_convert(self):
from scan_pdf import Converter
options = Options()
options.color_mode = 'bw'
options.resolution = 300
converter = Converter(options)
result = converter.convert('base', '.suffix')
import subprocess
subprocess.call.assert_called_with(['convert', '-depth', '1', '-density', '300', '-compress', 'zip', 'base.suffix', 'base.pdf'])
assert_that(result).is_equal_to(subprocess.call.return_value)
|
[
"andi@tryb.de"
] |
andi@tryb.de
|
9efef8515a5846ac87f18087a9c5a37d21d94e2b
|
2bdad552a0739f39b647678938a3c79b2fdde5fe
|
/src/old_code/mini_models/mini_models.py
|
54bf9a31a1503ac7b23d77181cafa9f45d4bc69a
|
[
"MIT"
] |
permissive
|
embeddedsamurai/single_shot_multibox_detector
|
7b59e8c0a5cadaff650896764edfef18b38f344d
|
2807da10b6e994ae72c1f287b0dfbf2f3f9116f9
|
refs/heads/master
| 2021-01-20T01:38:50.331431
| 2017-03-29T18:12:26
| 2017-03-29T18:12:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,876
|
py
|
import keras.backend as K
from keras.applications import VGG16
from keras.layers import Activation
from keras.layers import Convolution2D
from keras.layers import Dropout
from keras.layers import Lambda
from keras.layers import MaxPooling2D
from keras.layers import merge
from keras.layers import Reshape
from keras.models import Model
from layers import PriorBox2 as PriorBox
def mini_SSD(num_classes=21):
base_kernel_size = 4 + num_classes
aspect_ratios = (1, 2, 1/2)
num_aspect_ratios = len(aspect_ratios)
base_model = VGG16(weights='imagenet')
base_model.layers[0].name = 'input_1'
input_tensor = base_model.input
#input_tensor = base_model
#input_tensor.name = 'image_array'
for layer in base_model.layers:
layer.trainable = False
body = base_model.get_layer('block4_pool').output
body = Convolution2D((base_kernel_size * num_aspect_ratios), 3, 3,
border_mode='same')(body)
branch_1 = PriorBox(aspect_ratios)(body)
body = Convolution2D(32, 3, 3, border_mode='same')(branch_1)
body = Activation('relu')(body)
body = MaxPooling2D((2, 2))(body)
body = Dropout(.5)(body)
body = Convolution2D((base_kernel_size * num_aspect_ratios), 3, 3,
border_mode='same')(body)
branch_2 = PriorBox(aspect_ratios)(body)
body = Convolution2D(64, 3, 3, border_mode='same')(branch_2)
body = Activation('relu')(body)
body = MaxPooling2D((3, 3))(body)
body = Dropout(.5)(body)
body = Convolution2D((base_kernel_size * num_aspect_ratios), 3, 3,
border_mode='same')(body)
branch_3 = PriorBox(aspect_ratios)(body)
branch_1 = Reshape((-1, 4 + num_classes))(branch_1)
local_1 = Lambda(lambda x: x[:, :, :4])(branch_1)
class_1 = Lambda(lambda x: K.softmax(x[:, :, 4:]))(branch_1)
branch_2 = Reshape((-1, 4 + num_classes))(branch_2)
local_2 = Lambda(lambda x: x[:, :, :4])(branch_2)
class_2 = Lambda(lambda x: K.softmax(x[:, :, 4:]))(branch_2)
branch_3 = Reshape((-1, 4 + num_classes))(branch_3)
local_3 = Lambda(lambda x: x[:, :, :4])(branch_3)
class_3 = Lambda(lambda x: K.softmax(x[:, :, 4:]))(branch_3)
classification_tensor = merge([class_1, class_2, class_3], mode='concat',
concat_axis=1, name='classes')
localization_tensor = merge([local_1, local_2, local_3], mode='concat',
concat_axis=1, name='encoded_box')
output_tensor = merge([localization_tensor, classification_tensor],
mode='concat', concat_axis=-1, name='predictions')
model = Model(input_tensor, output_tensor)
return model
"""
if __name__ == '__main__':
model = mini_SSD()
model.summary()
from keras.utils.visualize_util import plot
plot(model, 'my_SSD.png')
"""
|
[
"arriaga.camargo@gmail.com"
] |
arriaga.camargo@gmail.com
|
dd3ee097cfafe78022793a50500d8412420c9b94
|
7c71776030428f86eb72d58580c263ade993cd70
|
/tests/Test_Memory_Shaper.py
|
06e3c8410e0affef7eb61bf68b04eae4483cfce3
|
[] |
no_license
|
p-christ/Action-Grammar-Reinforcement-Learning
|
0ae511039e0e86f3144644cf4b6c44249adbcb40
|
cddc88995b9f5717e81c72b94f5d03177b8c4468
|
refs/heads/master
| 2022-03-07T16:33:27.582929
| 2019-10-31T11:58:38
| 2019-10-31T11:58:38
| 218,323,156
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,710
|
py
|
from utilities.Memory_Shaper import Memory_Shaper
import numpy as np
buffer_size = 10
batch_size = 5
seed = 1
def new_reward_fn(cumulative_reward, length_of_macro_action):
"""Update reward to encourage usage of longer macro actions. The size of the improvement depends positively
on the length of the macro action"""
if cumulative_reward == 0.0: increment = 0.1
else: increment = abs(cumulative_reward)
total_change = increment * ((length_of_macro_action - 1)** 0.5) * 0.1
cumulative_reward += total_change
return cumulative_reward
def test_calculate_max_action_length():
"""Tests that calculate_max_action_length works correctly"""
memory_shaper = Memory_Shaper(buffer_size, batch_size, seed, new_reward_fn=new_reward_fn)
action_rules = {(0, 2, 33, 1, 22, 0, 0): 99, (0, 4): 2, (0, 9): 100}
assert memory_shaper.calculate_max_action_length(action_rules) == 7
action_rules = {(0, 2, 3): 99, (0, 4, 0, 0): 2, (0, 9): 100}
assert memory_shaper.calculate_max_action_length(action_rules) == 4
def test_add_adapted_experience_for_an_episode():
"""Tests that add_adapted_experience_for_an_episode works correctly"""
buffer_size = 3
memory_shaper = Memory_Shaper(buffer_size, buffer_size, seed,
new_reward_fn=new_reward_fn,
action_balanced_replay_buffer=False)
memory_shaper.reset()
states = [0, 1]
next_states = [1, 10]
rewards = [10, 5]
actions = [0, 5]
dones = [False, True]
memory_shaper.add_episode_experience(states, next_states, rewards, actions, dones)
action_rules = {6:(0, 5), 1: (1,), 2:(2,), 3:(3,), 4:(4,), 5:(5,), 0:(0,)}
replay_buffer = memory_shaper.put_adapted_experiences_in_a_replay_buffer(action_rules)
assert len(replay_buffer) == 3
s_states, s_actions, s_rewards, s_next_states, s_dones = replay_buffer.sample(separate_out_data_types=True)
assert all(s_states.numpy() == np.array([[0.0], [0.0], [1.0]]))
assert all(s_actions.numpy() == np.array([[0.0], [6.0], [5.0, ]]))
assert all(s_rewards.numpy() == np.array([[10.0], [new_reward_fn(15.0, 2)], [5.0, ]]))
assert all(s_next_states.numpy() == np.array([[1.0], [10.0], [10.0, ]]))
assert all(s_dones.numpy() == np.array([[0.0], [1.0], [1.0]]))
buffer_size = 5
memory_shaper = Memory_Shaper(buffer_size, buffer_size, seed, new_reward_fn=new_reward_fn,
action_balanced_replay_buffer=False)
memory_shaper.reset()
states = [0, 1, 2]
next_states = [1, 10, 11]
rewards = [10, 5, -4]
actions = [0, 5, 2]
dones = [False, False, True]
memory_shaper.add_episode_experience(states, next_states, rewards, actions, dones)
action_rules = {6: (0, 5), 7: (0, 5, 2), 1: (1,), 2:(2,), 3:(3,), 4:(4,), 5:(5,), 0:(0,)}
replay_buffer = memory_shaper.put_adapted_experiences_in_a_replay_buffer(action_rules)
assert len(replay_buffer) == 5
s_states, s_actions, s_rewards, s_next_states, s_dones = replay_buffer.sample(separate_out_data_types=True)
assert all(s_states.numpy() == np.array([[1.0], [0.0], [0.0], [2.0], [0.0]]))
assert all(s_actions.numpy() == np.array([[5.0],[0.0], [7.0], [2.0], [6.0]]))
assert np.allclose(s_rewards.numpy(), np.array([[5.0], [10.0], [np.round(new_reward_fn(11.0, 3), 5)], [-4.0], [new_reward_fn(15.0, 2)]]))
assert all(s_next_states.numpy() == np.array([[10.0], [1.0], [11.0], [11.0], [10.0]]))
assert all(s_dones.numpy() == np.array([[0.0], [0.0], [1.0], [1.0], [0.0]]))
def test_add_adapted_experience_for_an_episode_long_action_length():
"""Tests that add_adapted_experience_for_an_episode works correctly for actions with length > 2"""
buffer_size = 4
memory_shaper = Memory_Shaper(buffer_size, buffer_size, seed, new_reward_fn=new_reward_fn)
states = [0, 1, 2]
next_states = [1, 10, 11]
rewards = [10, 5, 2]
actions = [0, 1, 2]
dones = [False, False, False]
memory_shaper.add_episode_experience(states, next_states, rewards, actions, dones)
action_rules = {3: (0, 1, 2), 0: (0,), 1: (1,), 2:(2, )}
replay_buffer = memory_shaper.put_adapted_experiences_in_a_replay_buffer(action_rules)
assert len(replay_buffer.memories[0]) == 1
assert len(replay_buffer.memories[1]) == 1
assert len(replay_buffer.memories[2]) == 1
assert len(replay_buffer.memories[3]) == 1
s_states, s_actions, s_rewards, s_next_states, s_dones = replay_buffer.sample(separate_out_data_types=True)
assert all(s_states.numpy() == np.array([[0.0], [1.0,], [2.0], [0.0]]))
assert all(s_actions.numpy() == np.array([[0.0], [1.0, ], [2.0], [3.0]]))
assert np.allclose(s_rewards.numpy(), np.array([[10.0], [5.0], [2.0], [new_reward_fn(17.0, 3)]]))
assert all(s_next_states.numpy() == np.array([[1.0], [10.0, ], [11.0], [11.0]]))
assert all(s_dones.numpy() == np.array([[0.0], [0.0], [0.0], [0.0]]))
def test_add_adapted_experience_for_multiple_episodes():
"""Tests that add_adapted_experience_for_an_episode works correctly for multiple episodes"""
# for reward_increment in [0.0, 0.5, 1.5]:
buffer_size = 6
memory_shaper = Memory_Shaper(buffer_size, 6, seed, new_reward_fn)
states = [0]
next_states = [1]
rewards = [10]
actions = [0]
dones = [False]
memory_shaper.add_episode_experience(states, next_states, rewards, actions, dones)
states = [1]
next_states = [2]
rewards = [11]
actions = [1]
dones = [True]
memory_shaper.add_episode_experience(states, next_states, rewards, actions, dones)
states = [1, 2]
next_states = [2, 3]
rewards = [11, 2]
actions = [0, 1]
dones = [False, True]
memory_shaper.add_episode_experience(states, next_states, rewards, actions, dones)
action_rules = {0:(0,), 1:(1,), 2:(0, 1)}
replay_buffer = memory_shaper.put_adapted_experiences_in_a_replay_buffer(action_rules)
assert len(replay_buffer.memories[0]) == 2
assert len(replay_buffer.memories[1]) == 2
assert len(replay_buffer.memories[2]) == 1
s_states, s_actions, s_rewards, s_next_states, s_dones = replay_buffer.sample(separate_out_data_types=True)
assert all(s_states.numpy() == np.array([[0.0], [1.0], [2.0], [1.0], [1.0], [1.0]]))
assert all(s_actions.numpy() == np.array([[0.0], [0.0], [1.], [1.], [2.], [2.]]))
assert np.allclose(s_rewards.numpy(), np.array([[10.0], [11.0], [2.0], [11.0], [new_reward_fn(13.0, 2)], [new_reward_fn(13.0, 2)]]))
assert all(s_next_states.numpy() == np.array([[1.0], [2.0], [3.0], [2.0], [3.0], [3.0]]))
assert all(s_dones.numpy() == np.array([[0.0], [0.0], [1.0], [1.], [1.], [1.]]))
|
[
"p.christodoulou2@gmail.com"
] |
p.christodoulou2@gmail.com
|
07cb99e1a52cc6e587019a5e17a328e0cf94ac78
|
f5d17f536bd8617ac3d56c7d5aca4002444b481d
|
/requests3/toolbelt/auth/http_proxy_digest.py
|
38e9013f3884cd95f06fbe355609f4c4d98fa14e
|
[
"Apache-2.0"
] |
permissive
|
cclauss/requests3
|
a2df822642b4ecbbf9147b1df5e804e4b3d7ca35
|
260cd50aec1bd52b58968c3dfd0d5e850d563ac2
|
refs/heads/master
| 2020-05-15T18:47:10.559821
| 2019-04-20T17:30:15
| 2019-04-20T17:30:15
| 182,370,659
| 0
| 0
|
NOASSERTION
| 2019-04-20T06:41:56
| 2019-04-20T06:41:55
| null |
UTF-8
|
Python
| false
| false
| 3,668
|
py
|
# -*- coding: utf-8 -*-
"""The module containing HTTPProxyDigestAuth."""
import re
from requests import cookies, utils
from . import _digest_auth_compat as auth
class HTTPProxyDigestAuth(auth.HTTPDigestAuth):
"""HTTP digest authentication between proxy
:param stale_rejects: The number of rejects indicate that:
the client may wish to simply retry the request
with a new encrypted response, without reprompting the user for a
new username and password. i.e., retry build_digest_header
:type stale_rejects: int
"""
_pat = re.compile(r"digest ", flags=re.IGNORECASE)
def __init__(self, *args, **kwargs):
super(HTTPProxyDigestAuth, self).__init__(*args, **kwargs)
self.stale_rejects = 0
self.init_per_thread_state()
@property
def stale_rejects(self):
thread_local = getattr(self, "_thread_local", None)
if thread_local is None:
return self._stale_rejects
return thread_local.stale_rejects
@stale_rejects.setter
def stale_rejects(self, value):
thread_local = getattr(self, "_thread_local", None)
if thread_local is None:
self._stale_rejects = value
else:
thread_local.stale_rejects = value
def init_per_thread_state(self):
try:
super(HTTPProxyDigestAuth, self).init_per_thread_state()
except AttributeError:
# If we're not on requests 2.8.0+ this method does not exist
pass
def handle_407(self, r, **kwargs):
"""Handle HTTP 407 only once, otherwise give up
:param r: current response
:returns: responses, along with the new response
"""
if r.status_code == 407 and self.stale_rejects < 2:
s_auth = r.headers.get("proxy-authenticate")
if s_auth is None:
raise IOError(
"proxy server violated RFC 7235:"
"407 response MUST contain header proxy-authenticate"
)
elif not self._pat.match(s_auth):
return r
self.chal = utils.parse_dict_header(self._pat.sub("", s_auth, count=1))
# if we present the user/passwd and still get rejected
# https://tools.ietf.org/html/rfc2617#section-3.2.1
if "Proxy-Authorization" in r.request.headers and "stale" in self.chal:
if self.chal["stale"].lower() == "true": # try again
self.stale_rejects += 1
# wrong user/passwd
elif self.chal["stale"].lower() == "false":
raise IOError("User or password is invalid")
# Consume content and release the original connection
# to allow our new request to reuse the same one.
r.content
r.close()
prep = r.request.copy()
cookies.extract_cookies_to_jar(prep._cookies, r.request, r.raw)
prep.prepare_cookies(prep._cookies)
prep.headers["Proxy-Authorization"] = self.build_digest_header(
prep.method, prep.url
)
_r = r.connection.send(prep, **kwargs)
_r.history.append(r)
_r.request = prep
return _r
else: # give up authenticate
return r
def __call__(self, r):
self.init_per_thread_state()
# if we have nonce, then just use it, otherwise server will tell us
if self.last_nonce:
r.headers["Proxy-Authorization"] = self.build_digest_header(r.method, r.url)
r.register_hook("response", self.handle_407)
return r
|
[
"me@kennethreitz.org"
] |
me@kennethreitz.org
|
da33ecd6cb5985519aebc9f35185190401aabb93
|
17032b7326510e360adbc61d8566c42ce8a0483a
|
/python2.7/site-packages/firewall/core/ipXtables.py
|
437808027155b96757b49e310a29f4bfd2428ca0
|
[] |
no_license
|
noslin005/wzz
|
d220f8ed84fe2bc8dca9b764ccc72ee807740fbc
|
3a4e9fd377076b3a21730af9fca82e4b2e61702b
|
refs/heads/master
| 2021-11-03T09:33:47.126484
| 2019-04-15T01:19:02
| 2019-04-15T01:19:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,988
|
py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010-2016 Red Hat, Inc.
#
# Authors:
# Thomas Woerner <twoerner@redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os.path
from firewall.core.prog import runProg
from firewall.core.logger import log
from firewall.functions import tempFile, readfile, splitArgs
from firewall import config
import string
PROC_IPxTABLE_NAMES = {
"ipv4": "/proc/net/ip_tables_names",
"ipv6": "/proc/net/ip6_tables_names",
}
BUILT_IN_CHAINS = {
"security": [ "INPUT", "OUTPUT", "FORWARD" ],
"raw": [ "PREROUTING", "OUTPUT" ],
"mangle": [ "PREROUTING", "POSTROUTING", "INPUT", "OUTPUT", "FORWARD" ],
"nat": [ "PREROUTING", "POSTROUTING", "OUTPUT" ],
"filter": [ "INPUT", "OUTPUT", "FORWARD" ],
}
DEFAULT_REJECT_TYPE = {
"ipv4": "icmp-host-prohibited",
"ipv6": "icmp6-adm-prohibited",
}
ICMP = {
"ipv4": "icmp",
"ipv6": "ipv6-icmp",
}
DEFAULT_RULES = { }
LOG_RULES = { }
OUR_CHAINS = {} # chains created by firewalld
DEFAULT_RULES["security"] = [ ]
OUR_CHAINS["security"] = set()
for chain in BUILT_IN_CHAINS["security"]:
DEFAULT_RULES["security"].append("-N %s_direct" % chain)
DEFAULT_RULES["security"].append("-I %s 1 -j %s_direct" % (chain, chain))
OUR_CHAINS["security"].add("%s_direct" % chain)
DEFAULT_RULES["raw"] = [ ]
OUR_CHAINS["raw"] = set()
for chain in BUILT_IN_CHAINS["raw"]:
DEFAULT_RULES["raw"].append("-N %s_direct" % chain)
DEFAULT_RULES["raw"].append("-I %s 1 -j %s_direct" % (chain, chain))
OUR_CHAINS["raw"].add("%s_direct" % chain)
if chain == "PREROUTING":
DEFAULT_RULES["raw"].append("-N %s_ZONES_SOURCE" % chain)
DEFAULT_RULES["raw"].append("-N %s_ZONES" % chain)
DEFAULT_RULES["raw"].append("-I %s 2 -j %s_ZONES_SOURCE" % (chain, chain))
DEFAULT_RULES["raw"].append("-I %s 3 -j %s_ZONES" % (chain, chain))
OUR_CHAINS["raw"].update(set(["%s_ZONES_SOURCE" % chain, "%s_ZONES" % chain]))
DEFAULT_RULES["mangle"] = [ ]
OUR_CHAINS["mangle"] = set()
for chain in BUILT_IN_CHAINS["mangle"]:
DEFAULT_RULES["mangle"].append("-N %s_direct" % chain)
DEFAULT_RULES["mangle"].append("-I %s 1 -j %s_direct" % (chain, chain))
OUR_CHAINS["mangle"].add("%s_direct" % chain)
if chain == "PREROUTING":
DEFAULT_RULES["mangle"].append("-N %s_ZONES_SOURCE" % chain)
DEFAULT_RULES["mangle"].append("-N %s_ZONES" % chain)
DEFAULT_RULES["mangle"].append("-I %s 2 -j %s_ZONES_SOURCE" % (chain, chain))
DEFAULT_RULES["mangle"].append("-I %s 3 -j %s_ZONES" % (chain, chain))
OUR_CHAINS["mangle"].update(set(["%s_ZONES_SOURCE" % chain, "%s_ZONES" % chain]))
DEFAULT_RULES["nat"] = [ ]
OUR_CHAINS["nat"] = set()
for chain in BUILT_IN_CHAINS["nat"]:
DEFAULT_RULES["nat"].append("-N %s_direct" % chain)
DEFAULT_RULES["nat"].append("-I %s 1 -j %s_direct" % (chain, chain))
OUR_CHAINS["nat"].add("%s_direct" % chain)
if chain in [ "PREROUTING", "POSTROUTING" ]:
DEFAULT_RULES["nat"].append("-N %s_ZONES_SOURCE" % chain)
DEFAULT_RULES["nat"].append("-N %s_ZONES" % chain)
DEFAULT_RULES["nat"].append("-I %s 2 -j %s_ZONES_SOURCE" % (chain, chain))
DEFAULT_RULES["nat"].append("-I %s 3 -j %s_ZONES" % (chain, chain))
OUR_CHAINS["nat"].update(set(["%s_ZONES_SOURCE" % chain, "%s_ZONES" % chain]))
DEFAULT_RULES["filter"] = [
"-N INPUT_direct",
"-N INPUT_ZONES_SOURCE",
"-N INPUT_ZONES",
"-I INPUT 1 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT",
"-I INPUT 2 -i lo -j ACCEPT",
"-I INPUT 3 -j INPUT_direct",
"-I INPUT 4 -j INPUT_ZONES_SOURCE",
"-I INPUT 5 -j INPUT_ZONES",
"-I INPUT 6 -m conntrack --ctstate INVALID -j DROP",
"-I INPUT 7 -j %%REJECT%%",
"-N FORWARD_direct",
"-N FORWARD_IN_ZONES_SOURCE",
"-N FORWARD_IN_ZONES",
"-N FORWARD_OUT_ZONES_SOURCE",
"-N FORWARD_OUT_ZONES",
"-I FORWARD 1 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT",
"-I FORWARD 2 -i lo -j ACCEPT",
"-I FORWARD 3 -j FORWARD_direct",
"-I FORWARD 4 -j FORWARD_IN_ZONES_SOURCE",
"-I FORWARD 5 -j FORWARD_IN_ZONES",
"-I FORWARD 6 -j FORWARD_OUT_ZONES_SOURCE",
"-I FORWARD 7 -j FORWARD_OUT_ZONES",
"-I FORWARD 8 -m conntrack --ctstate INVALID -j DROP",
"-I FORWARD 9 -j %%REJECT%%",
"-N OUTPUT_direct",
"-I OUTPUT 1 -j OUTPUT_direct",
]
LOG_RULES["filter"] = [
"-I INPUT 6 -m conntrack --ctstate INVALID %%LOGTYPE%% -j LOG --log-prefix 'STATE_INVALID_DROP: '",
"-I INPUT 8 %%LOGTYPE%% -j LOG --log-prefix 'FINAL_REJECT: '",
"-I FORWARD 8 -m conntrack --ctstate INVALID %%LOGTYPE%% -j LOG --log-prefix 'STATE_INVALID_DROP: '",
"-I FORWARD 10 %%LOGTYPE%% -j LOG --log-prefix 'FINAL_REJECT: '",
]
OUR_CHAINS["filter"] = set(["INPUT_direct", "INPUT_ZONES_SOURCE", "INPUT_ZONES",
"FORWARD_direct", "FORWARD_IN_ZONES_SOURCE",
"FORWARD_IN_ZONES", "FORWARD_OUT_ZONES_SOURCE",
"FORWARD_OUT_ZONES", "OUTPUT_direct"])
class ip4tables(object):
ipv = "ipv4"
def __init__(self):
self._command = config.COMMANDS[self.ipv]
self._restore_command = config.COMMANDS["%s-restore" % self.ipv]
self.wait_option = self._detect_wait_option()
self.restore_wait_option = self._detect_restore_wait_option()
self.fill_exists()
def fill_exists(self):
self.command_exists = os.path.exists(self._command)
self.restore_command_exists = os.path.exists(self._restore_command)
def __run(self, args):
# convert to string list
if self.wait_option and self.wait_option not in args:
_args = [self.wait_option] + ["%s" % item for item in args]
else:
_args = ["%s" % item for item in args]
log.debug2("%s: %s %s", self.__class__, self._command, " ".join(_args))
(status, ret) = runProg(self._command, _args)
if status != 0:
raise ValueError("'%s %s' failed: %s" % (self._command,
" ".join(_args), ret))
return ret
def split_value(self, rules, opts=None):
"""Split values combined with commas for options in opts"""
if opts is None:
return rules
out_rules = [ ]
for rule in rules:
processed = False
for opt in opts:
try:
i = rule.index(opt)
except ValueError:
pass
else:
if len(rule) > i and "," in rule[i+1]:
# For all items in the comma separated list in index
# i of the rule, a new rule is created with a single
# item from this list
processed = True
items = rule[i+1].split(",")
for item in items:
_rule = rule[:]
_rule[i+1] = item
out_rules.append(_rule)
if not processed:
out_rules.append(rule)
return out_rules
def _rule_replace(self, rule, pattern, replacement):
try:
i = rule.index(pattern)
except ValueError:
return False
else:
rule[i:i+1] = replacement
return True
def set_rules(self, rules, flush=False, log_denied="off"):
temp_file = tempFile()
table_rules = { }
for _rule in rules:
rule = _rule[:]
# replace %%REJECT%%
self._rule_replace(rule, "%%REJECT%%", \
["REJECT", "--reject-with", DEFAULT_REJECT_TYPE[self.ipv]])
# replace %%ICMP%%
self._rule_replace(rule, "%%ICMP%%", [ICMP[self.ipv]])
# replace %%LOGTYPE%%
try:
i = rule.index("%%LOGTYPE%%")
except ValueError:
pass
else:
if log_denied == "off":
continue
if log_denied in [ "unicast", "broadcast", "multicast" ]:
rule[i:i+1] = [ "-m", "pkttype", "--pkt-type", log_denied ]
else:
rule.pop(i)
table = "filter"
# get table form rule
for opt in [ "-t", "--table" ]:
try:
i = rule.index(opt)
except ValueError:
pass
else:
if len(rule) >= i+1:
rule.pop(i)
table = rule.pop(i)
# we can not use joinArgs here, because it would use "'" instead
# of '"' for the start and end of the string, this breaks
# iptables-restore
for i in range(len(rule)):
for c in string.whitespace:
if c in rule[i] and not (rule[i].startswith('"') and
rule[i].endswith('"')):
rule[i] = '"%s"' % rule[i]
table_rules.setdefault(table, []).append(rule)
for table in table_rules:
rules = table_rules[table]
rules = self.split_value(rules, [ "-s", "--source" ])
rules = self.split_value(rules, [ "-d", "--destination" ])
temp_file.write("*%s\n" % table)
for rule in rules:
temp_file.write(" ".join(rule) + "\n")
temp_file.write("COMMIT\n")
temp_file.close()
stat = os.stat(temp_file.name)
log.debug2("%s: %s %s", self.__class__, self._restore_command,
"%s: %d" % (temp_file.name, stat.st_size))
args = [ ]
if self.restore_wait_option:
args.append(self.restore_wait_option)
if not flush:
args.append("-n")
(status, ret) = runProg(self._restore_command, args,
stdin=temp_file.name)
if log.getDebugLogLevel() > 2:
lines = readfile(temp_file.name)
if lines is not None:
i = 1
for line in lines:
log.debug3("%8d: %s" % (i, line), nofmt=1, nl=0)
if not line.endswith("\n"):
log.debug3("", nofmt=1)
i += 1
os.unlink(temp_file.name)
if status != 0:
raise ValueError("'%s %s' failed: %s" % (self._restore_command,
" ".join(args), ret))
return ret
def set_rule(self, rule, log_denied="off"):
# replace %%REJECT%%
self._rule_replace(rule, "%%REJECT%%", \
["REJECT", "--reject-with", DEFAULT_REJECT_TYPE[self.ipv]])
# replace %%ICMP%%
self._rule_replace(rule, "%%ICMP%%", [ICMP[self.ipv]])
# replace %%LOGTYPE%%
try:
i = rule.index("%%LOGTYPE%%")
except ValueError:
pass
else:
if log_denied == "off":
return ""
if log_denied in [ "unicast", "broadcast", "multicast" ]:
rule[i:i+1] = [ "-m", "pkttype", "--pkt-type",
self._log_denied ]
else:
rule.pop(i)
return self.__run(rule)
def append_rule(self, rule):
self.__run([ "-A" ] + rule)
def delete_rule(self, rule):
self.__run([ "-D" ] + rule)
def available_tables(self, table=None):
ret = []
tables = [ table ] if table else BUILT_IN_CHAINS.keys()
for table in tables:
try:
self.__run(["-t", table, "-L", "-n"])
ret.append(table)
except ValueError:
log.debug1("%s table '%s' does not exist (or not enough permission to check)." % (self.ipv, table))
return ret
def used_tables(self):
tables = [ ]
filename = PROC_IPxTABLE_NAMES[self.ipv]
if os.path.exists(filename):
with open(filename, "r") as f:
for line in f.readlines():
if not line:
break
tables.append(line.strip())
return tables
def _detect_wait_option(self):
wait_option = ""
ret = runProg(self._command, ["-w", "-L", "-n"]) # since iptables-1.4.20
if ret[0] == 0:
wait_option = "-w" # wait for xtables lock
ret = runProg(self._command, ["-w2", "-L", "-n"]) # since iptables > 1.4.21
if ret[0] == 0:
wait_option = "-w2" # wait max 2 seconds
log.debug2("%s: %s will be using %s option.", self.__class__, self._command, wait_option)
return wait_option
def _detect_restore_wait_option(self):
temp_file = tempFile()
temp_file.write("#foo")
temp_file.close()
wait_option = ""
for test_option in ["-w", "--wait=2"]:
ret = runProg(self._restore_command, [test_option], stdin=temp_file.name)
if ret[0] == 0 and "invalid option" not in ret[1] \
and "unrecognized option" not in ret[1]:
wait_option = test_option
break
log.debug2("%s: %s will be using %s option.", self.__class__, self._restore_command, wait_option)
os.unlink(temp_file.name)
return wait_option
def flush(self, transaction=None):
tables = self.used_tables()
for table in tables:
# Flush firewall rules: -F
# Delete firewall chains: -X
# Set counter to zero: -Z
for flag in [ "-F", "-X", "-Z" ]:
if transaction is not None:
transaction.add_rule(self.ipv, [ "-t", table, flag ])
else:
self.__run([ "-t", table, flag ])
def set_policy(self, policy, which="used", transaction=None):
if which == "used":
tables = self.used_tables()
else:
tables = list(BUILT_IN_CHAINS.keys())
for table in tables:
if table == "nat":
continue
for chain in BUILT_IN_CHAINS[table]:
if transaction is not None:
transaction.add_rule(self.ipv,
[ "-t", table, "-P", chain, policy ])
else:
self.__run([ "-t", table, "-P", chain, policy ])
def supported_icmp_types(self):
"""Return ICMP types that are supported by the iptables/ip6tables command and kernel"""
ret = [ ]
output = ""
try:
output = self.__run(["-p",
"icmp" if self.ipv == "ipv4" else "ipv6-icmp",
"--help"])
except ValueError as ex:
if self.ipv == "ipv4":
log.debug1("iptables error: %s" % ex)
else:
log.debug1("ip6tables error: %s" % ex)
lines = output.splitlines()
in_types = False
for line in lines:
#print(line)
if in_types:
line = line.strip().lower()
splits = line.split()
for split in splits:
if split.startswith("(") and split.endswith(")"):
x = split[1:-1]
else:
x = split
if x not in ret:
ret.append(x)
if self.ipv == "ipv4" and line.startswith("Valid ICMP Types:") or \
self.ipv == "ipv6" and line.startswith("Valid ICMPv6 Types:"):
in_types = True
return ret
def apply_default_rules(self, transaction, log_denied="off"):
for table in DEFAULT_RULES:
if table not in self.available_tables():
continue
default_rules = DEFAULT_RULES[table][:]
if log_denied != "off" and table in LOG_RULES:
default_rules.extend(LOG_RULES[table])
prefix = [ "-t", table ]
for rule in default_rules:
if type(rule) == list:
_rule = prefix + rule
else:
_rule = prefix + splitArgs(rule)
transaction.add_rule(self.ipv, _rule)
class ip6tables(ip4tables):
ipv = "ipv6"
def apply_rpfilter_rules(self, transaction, log_denied=False):
transaction.add_rule(self.ipv,
[ "-I", "PREROUTING", "1", "-t", "raw",
"-p", "ipv6-icmp",
"--icmpv6-type=router-advertisement",
"-j", "ACCEPT" ]) # RHBZ#1058505
transaction.add_rule(self.ipv,
[ "-I", "PREROUTING", "2", "-t", "raw",
"-m", "rpfilter", "--invert", "-j", "DROP" ])
if log_denied != "off":
transaction.add_rule(self.ipv,
[ "-I", "PREROUTING", "2", "-t", "raw",
"-m", "rpfilter", "--invert",
"-j", "LOG",
"--log-prefix", "rpfilter_DROP: " ])
# ipv ebtables also uses this
#
def reverse_rule(self, args):
""" Inverse valid rule """
replace_args = {
# Append
"-A": "-D",
"--append": "--delete",
# Insert
"-I": "-D",
"--insert": "--delete",
# New chain
"-N": "-X",
"--new-chain": "--delete-chain",
}
ret_args = args[:]
for arg in replace_args:
try:
idx = ret_args.index(arg)
except Exception:
continue
if arg in [ "-I", "--insert" ]:
# With insert rulenum, then remove it if it is a number
# Opt at position idx, chain at position idx+1, [rulenum] at
# position idx+2
try:
int(ret_args[idx+2])
except Exception:
pass
else:
ret_args.pop(idx+2)
ret_args[idx] = replace_args[arg]
return ret_args
|
[
"wuzhengzhong@ruiec.cn"
] |
wuzhengzhong@ruiec.cn
|
c7050f5f25178f18d977879c234baea3c726f0ca
|
d88397be1c6a31985bc2283280e743fd3b988dd1
|
/nncf/structures.py
|
b29eca4968e93b8bf41744073ad75940d2d2a247
|
[
"Apache-2.0"
] |
permissive
|
sshyran/openvino-nncf-pytorch
|
f5e09066a216fa786927937a91a0e6742f347660
|
fd02652950cd803a36f5283f5a5df999bb45433b
|
refs/heads/develop
| 2023-04-18T06:58:54.646669
| 2021-03-12T15:41:39
| 2021-03-12T15:41:39
| 347,374,166
| 0
| 0
|
Apache-2.0
| 2023-04-03T23:52:21
| 2021-03-13T13:11:32
| null |
UTF-8
|
Python
| false
| false
| 6,486
|
py
|
"""
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import Callable, Any
import torch
from torch.nn.modules.loss import _Loss
from torch.utils.data import DataLoader
from nncf.config.structure import NNCFExtraConfigStruct
class QuantizationPrecisionInitArgs(NNCFExtraConfigStruct):
"""
Stores arguments for initialization of quantization's bitwidth.
Initialization is based on calculating a measure reflecting layers' sensitivity to perturbations. The measure is
calculated by estimation of average trace of Hessian for modules using the Hutchinson algorithm.
:param criterion_fn: callable object, that implements calculation of loss by given outputs of the model, targets,
and loss function. It's not needed when the calculation of loss is just a direct call of the criterion with 2
arguments: outputs of model and targets. For all other specific cases, the callable object should be provided.
E.g. for inception-v3, the losses for two outputs of the model are combined with different weight.
:param criterion: loss function, instance of descendant of `torch.nn.modules.loss._Loss`,
:param data_loader: 'data_loader' - provides an iterable over the given dataset. Instance of
nncf.initialization.InitializingDataLoader; a regular 'torch.utils.data.DataLoader' may
also be passed, but only in the simple case when it returns a tuple of (input, target) tensors.
*WARNING*: The final quantizer setup of the created compressed model is dependent on the data
provided by the data_loader. When using PyTorch's DistributedDataParallel with precision
initialization, make sure that each process in the distributed group receives the same data
from the data_loader as the other processes, otherwise the create_compressed_model call may
create different compressed model objects for each distributed process and the distributed training
will fail.
:param device: Device to perform initialization at. Either 'cpu', 'cuda', or None (default); if None, will
use the device of the model's parameters.
"""
def __init__(self, criterion_fn: Callable[[Any, Any, _Loss], torch.Tensor], criterion: _Loss,
data_loader: DataLoader, device: str = None):
self.criterion_fn = criterion_fn
self.criterion = criterion
self.data_loader = data_loader
self.device = device
@classmethod
def get_id(cls) -> str:
return "quantization_precision_init_args"
class QuantizationRangeInitArgs(NNCFExtraConfigStruct):
"""
Stores arguments for initialization of quantization's ranges.
Initialization is done by collecting per-layer activation statistics on training dataset in order to choose proper
output range for quantization.
:param data_loader: 'data_loader' - provides an iterable over the given dataset. Instance of
nncf.initialization.InitializingDataLoader; a regular 'torch.utils.data.DataLoader' may
also be passed, but only in the simple case when it returns a tuple of (input, target) tensors.
:param device: Device to perform initialization at. Either 'cpu', 'cuda', or None (default); if None, will
use the device of the model's parameters.
"""
def __init__(self, data_loader: DataLoader, device: str = None):
self.data_loader = data_loader
self.device = device
@classmethod
def get_id(cls) -> str:
return "quantization_range_init_args"
class BNAdaptationInitArgs(NNCFExtraConfigStruct):
"""
Stores arguments for BatchNorm statistics adaptation procedure.
Adaptation is done by inferring a number of data batches on a compressed model
while the BN layers are updating the rolling_mean and rolling_variance stats.
:param data_loader: 'data_loader' - provides an iterable over the given dataset. Instance of
nncf.initialization.InitializingDataLoader; a regular 'torch.utils.data.DataLoader' may
also be passed, but only in the simple case when it returns a tuple of (input, target) tensors.
:param device: Device to perform initialization at. Either 'cpu', 'cuda', or None (default); if None, will
use the device of the model's parameters.
"""
def __init__(self, data_loader: DataLoader, device: str = None):
self.data_loader = data_loader
self.device = device
@classmethod
def get_id(cls) -> str:
return "bn_adaptation_init_args"
class AutoQPrecisionInitArgs(NNCFExtraConfigStruct):
"""
:param data_loader: 'data_loader' - provides an iterable over the given dataset. Instance of
nncf.initialization.InitializingDataLoader; a regular 'torch.utils.data.DataLoader' may
also be passed, but only in the simple case when it returns a tuple of (input, target) tensors.
*WARNING*: The final quantizer setup of the created compressed model is dependent on the data
provided by the data_loader. When using PyTorch's DistributedDataParallel with precision
initialization, make sure that each process in the distributed group receives the same data
from the data_loader as the other processes, otherwise the create_compressed_model call may
create different compressed model objects for each distributed process and the distributed training
will fail.
"""
def __init__(self, data_loader: DataLoader,
eval_fn: Callable[[torch.nn.Module, torch.utils.data.DataLoader], float],
nncf_config: 'NNCFConfig'):
self.data_loader = data_loader
self.eval_fn = eval_fn
self.config = nncf_config
@classmethod
def get_id(cls) -> str:
return "autoq_precision_init_args"
|
[
"noreply@github.com"
] |
sshyran.noreply@github.com
|
a3de73991f302e4984380b4f57432e7d9167b0a3
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/mDzheHpwtqyXePEBE_12.py
|
5d5a6c21e0b1f47994b25b44b57f1c91dbd275ce
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 53
|
py
|
def sum_polygon(n):
sum = (n-2)*180
return sum
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
862b20b09be678debb5763ce7aed391cdd305028
|
375f29655b966e7dbac2297b3f79aadb5d03b737
|
/Image/test6.py
|
e0c2f30ebc95da86684e30c0aa49fd81708a4285
|
[
"MIT"
] |
permissive
|
pection-zz/FindJointwithImageprocessing
|
33e0b47ca3629d85e739edcd88dcd1663af88631
|
3dd4563be88dfcf005c32f19ae97d03f9bf715ad
|
refs/heads/master
| 2022-12-23T11:09:04.391591
| 2020-10-05T16:35:21
| 2020-10-05T16:35:21
| 301,473,183
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,062
|
py
|
import numpy as np
import cv2
# mouse callback function
def draw_circle(event,x,y,flags,param):
global human,tiger,logo
positionX = tiger.shape[1]
positionY = tiger.shape[0]
col = human.shape[1]
row = human.shape[0]
if event == cv2.EVENT_LBUTTONDOWN: # click.
y -= row/2 #
x -= col/2 # center.
if y+row > positionY : ## error.
#row = positionY-y
y = positionY-row
elif y < 0:
y = 0
if x + col > positionX:
#col = positionX - x
x = positionX - col
elif x < 0:
x = 0 ## error.
# print (x,y) # position x,y
# print (positionX,positionY)
logo = tiger[y:y + row, x:x + col] # show tiger picture before add human picture.
k = cv2.waitKey(1000) & 0xFF # ESC Exit.
if k == ord('1'): # function
logo = np.add(logo,human[0:row,0:col])
if k == ord('2'):
logo = np.subtract(logo,human[0:row,0:col])
if k == ord('3'):
logo = np.multiply(logo,human[0:row,0:col])
if k == ord('4'):
logo = np.divide(logo,human[0:row,0:col])
if k == ord('5'):
logo = np.bitwise_and(logo,human[0:row,0:col])
if k == ord('6'):
logo = np.bitwise_or(logo,human[0:row,0:col])
if k == ord('7'):
logo = np.bitwise_xor(logo,human[0:row,0:col]) # function.
tiger[y:y+row, x:x+col] = logo # show tiger picture after add human picture.
# Create a black image, a window and bind the function to window
tiger = cv2.imread('C:\Code_python\Image\Picture\Tiger.jpg')
human = cv2.imread('C:\Code_python\Image\Picture\Human.jpg')
while(1):
cv2.setMouseCallback('image', draw_circle)
cv2.imshow('image',tiger)
#cv2.imshow('eiei',img2)
k = cv2.waitKey(20) & 0xFF
if k == 27:
break
cv2.destroyAllWindows()
|
[
"pection.naphat@gmail.com"
] |
pection.naphat@gmail.com
|
6f194f8fee5688dc5d7ab4e2da990b9aaf9ad2a8
|
ecb286df5937cd30855335f3e9eadd3edbddbd02
|
/CARSELL.py
|
2c5425df7c655dd40a48d79202114e60d1bfbfba
|
[] |
no_license
|
prashant97sikarwar/codechef-april-long-challenge-2020
|
fbbddadb3398a285fe735f3c0049f74371b79100
|
5e459541044b54e64fd63b072ff5bf4870dea126
|
refs/heads/master
| 2022-04-14T21:06:41.237608
| 2020-04-13T19:48:29
| 2020-04-13T19:48:29
| 254,818,988
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 317
|
py
|
t = int(input())
while t > 0:
n = int(input())
arr = list(map(int,input().split()))
arr.sort()
arr = arr[::-1]
total = 0
for i in range(n):
fg = arr[i] - i
if fg > 0:
total += fg
else:
break
ans = total % 1000000007
print(ans)
t -= 1
|
[
"prashant97sikarwar@gmail.com"
] |
prashant97sikarwar@gmail.com
|
3dfbf8c94d2352552fc10b1451ec343edf118d69
|
686d2e525b7cd7a792501309f251dbf6dcea7ef4
|
/剑指offer/14.2剪绳子-贪心法.py
|
cd7abf147180fc0496002f548c7b684b08bfbf0b
|
[] |
no_license
|
freemanwang/Algorithm
|
fa23c9c33c43f942e72d9d1828a95417e7c99575
|
bb691c1afb460a382d7aaaa308e8b4e17f5bf4c5
|
refs/heads/master
| 2020-06-29T19:37:32.584724
| 2020-02-07T06:36:29
| 2020-02-07T06:36:29
| 200,605,658
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 520
|
py
|
def maxCuttingSolution(length:int):
if length < 2:
return 0
if length == 2:
return 1
if length == 3:
return 2
#多剪长为3的段
timeOf3 = length // 3
#当最后剩下长度为4时减2*2,比3*1好
if length - timeOf3*3 == 1:
timeOf3 -= 1
timeOf2 = (length - timeOf3*3) // 2
print('长为3的段有:',timeOf3,'段; ','长为2的段有:',timeOf2,'段')
return pow(3,timeOf3) * pow(2,timeOf2)
max = maxCuttingSolution(7)
print(max)
|
[
"121689123@qq.com"
] |
121689123@qq.com
|
56fa05c2c2209de10689e7ac05abd0c4819683e8
|
531c47c15b97cbcb263ec86821d7f258c81c0aaf
|
/sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_12_01/aio/operations_async/_public_ip_prefixes_operations_async.py
|
f793063e7b65111a6d47cd949a6ce5c76ff9a131
|
[
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later",
"MIT"
] |
permissive
|
YijunXieMS/azure-sdk-for-python
|
be364d3b88204fd3c7d223df23756386ff7a3361
|
f779de8e53dbec033f98f976284e6d9491fd60b3
|
refs/heads/master
| 2021-07-15T18:06:28.748507
| 2020-09-04T15:48:52
| 2020-09-04T15:48:52
| 205,457,088
| 1
| 2
|
MIT
| 2020-06-16T16:38:15
| 2019-08-30T21:08:55
|
Python
|
UTF-8
|
Python
| false
| false
| 27,622
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PublicIPPrefixesOperations:
"""PublicIPPrefixesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_12_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
public_ip_prefix_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-12-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpPrefixName': self._serialize.url("public_ip_prefix_name", public_ip_prefix_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
public_ip_prefix_name: str,
**kwargs
) -> None:
"""Deletes the specified public IP prefix.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_prefix_name: The name of the PublicIpPrefix.
:type public_ip_prefix_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: None, or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
public_ip_prefix_name=public_ip_prefix_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'} # type: ignore
async def get(
self,
resource_group_name: str,
public_ip_prefix_name: str,
expand: Optional[str] = None,
**kwargs
) -> "models.PublicIPPrefix":
"""Gets the specified public IP prefix in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_prefix_name: The name of the public IP prefix.
:type public_ip_prefix_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PublicIPPrefix, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_12_01.models.PublicIPPrefix
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.PublicIPPrefix"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-12-01"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpPrefixName': self._serialize.url("public_ip_prefix_name", public_ip_prefix_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PublicIPPrefix', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
public_ip_prefix_name: str,
parameters: "models.PublicIPPrefix",
**kwargs
) -> "models.PublicIPPrefix":
cls = kwargs.pop('cls', None) # type: ClsType["models.PublicIPPrefix"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-12-01"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpPrefixName': self._serialize.url("public_ip_prefix_name", public_ip_prefix_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
# Construct and send request
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'PublicIPPrefix')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PublicIPPrefix', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('PublicIPPrefix', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
public_ip_prefix_name: str,
parameters: "models.PublicIPPrefix",
**kwargs
) -> "models.PublicIPPrefix":
"""Creates or updates a static or dynamic public IP prefix.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_prefix_name: The name of the public IP prefix.
:type public_ip_prefix_name: str
:param parameters: Parameters supplied to the create or update public IP prefix operation.
:type parameters: ~azure.mgmt.network.v2018_12_01.models.PublicIPPrefix
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: PublicIPPrefix, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_12_01.models.PublicIPPrefix
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.PublicIPPrefix"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
public_ip_prefix_name=public_ip_prefix_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PublicIPPrefix', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'} # type: ignore
async def _update_tags_initial(
self,
resource_group_name: str,
public_ip_prefix_name: str,
parameters: "models.TagsObject",
**kwargs
) -> "models.PublicIPPrefix":
cls = kwargs.pop('cls', None) # type: ClsType["models.PublicIPPrefix"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-12-01"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpPrefixName': self._serialize.url("public_ip_prefix_name", public_ip_prefix_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
# Construct and send request
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PublicIPPrefix', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'} # type: ignore
async def begin_update_tags(
self,
resource_group_name: str,
public_ip_prefix_name: str,
parameters: "models.TagsObject",
**kwargs
) -> "models.PublicIPPrefix":
"""Updates public IP prefix tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_prefix_name: The name of the public IP prefix.
:type public_ip_prefix_name: str
:param parameters: Parameters supplied to update public IP prefix tags.
:type parameters: ~azure.mgmt.network.v2018_12_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: PublicIPPrefix, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_12_01.models.PublicIPPrefix
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.PublicIPPrefix"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_tags_initial(
resource_group_name=resource_group_name,
public_ip_prefix_name=public_ip_prefix_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PublicIPPrefix', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'} # type: ignore
def list_all(
self,
**kwargs
) -> AsyncIterable["models.PublicIPPrefixListResult"]:
"""Gets all the public IP prefixes in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PublicIPPrefixListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_12_01.models.PublicIPPrefixListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.PublicIPPrefixListResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-12-01"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PublicIPPrefixListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/publicIPPrefixes'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["models.PublicIPPrefixListResult"]:
"""Gets all public IP prefixes in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PublicIPPrefixListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_12_01.models.PublicIPPrefixListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.PublicIPPrefixListResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-12-01"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PublicIPPrefixListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes'} # type: ignore
|
[
"noreply@github.com"
] |
YijunXieMS.noreply@github.com
|
cf5073e4c44b95893676e26aa7ff6ef52cd01700
|
ca5d7fc720217f7b073c567d389a211725ecf401
|
/apps/storybase_story/migrations/0017_call_to_action_to_translation.py
|
46c03d92ff8e8eecbc46ef8b28774debc414494b
|
[
"MIT"
] |
permissive
|
denverfoundation/storybase
|
49b533b5572b6e70cdd036574ea4c84c765cd4b3
|
15e429df850b68ee107a9b8206adc44fe1174370
|
refs/heads/develop
| 2020-04-04T07:12:52.203147
| 2015-08-07T05:22:13
| 2015-08-07T05:22:13
| 3,374,036
| 3
| 5
| null | 2015-11-18T14:19:11
| 2012-02-07T03:35:38
|
Python
|
UTF-8
|
Python
| false
| false
| 17,374
|
py
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import DataMigration
from django.conf import settings
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
for story in orm.Story.objects.all():
call_to_action = story.call_to_action
if call_to_action:
translations = orm.StoryTranslation.objects.filter(story=story)
if translations:
try:
translation = translations.get(
language=settings.LANGUAGE_CODE)
except orm.StoryTranslation.DoesNotExist:
translation = translations[0]
translation.call_to_action = call_to_action
translation.save()
def backwards(self, orm):
for story in orm.Story.objects.all():
# Default slug, should be '' after previous migration
translations = orm.StoryTranslation.objects.filter(story=story)
if translations:
try:
translation = translations.get(
language=settings.LANGUAGE_CODE)
except orm.StoryTranslation.DoesNotExist:
translation = translations[0]
if translation.call_to_action:
story.call_to_action = translation.call_to_action
story.save()
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'storybase_asset.asset': {
'Meta': {'object_name': 'Asset'},
'asset_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'asset_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'attribution': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'datasets': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'assets'", 'blank': 'True', 'to': "orm['storybase_asset.DataSet']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'license': ('django.db.models.fields.CharField', [], {'default': "'CC BY-NC-SA'", 'max_length': '25'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'assets'", 'null': 'True', 'to': "orm['auth.User']"}),
'published': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'section_specific': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'source_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "u'draft'", 'max_length': '10'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '10'})
},
'storybase_asset.dataset': {
'Meta': {'object_name': 'DataSet'},
'attribution': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'dataset_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'dataset_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'datasets'", 'null': 'True', 'to': "orm['auth.User']"}),
'published': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'source': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "u'draft'", 'max_length': '10'})
},
'storybase_story.section': {
'Meta': {'object_name': 'Section'},
'assets': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'sections'", 'blank': 'True', 'through': "orm['storybase_story.SectionAsset']", 'to': "orm['storybase_asset.Asset']"}),
'children': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['storybase_story.Section']", 'null': 'True', 'through': "orm['storybase_story.SectionRelation']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'root': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'section_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'story': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sections'", 'to': "orm['storybase_story.Story']"}),
'weight': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'storybase_story.sectionasset': {
'Meta': {'object_name': 'SectionAsset'},
'asset': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_asset.Asset']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'section': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_story.Section']"}),
'weight': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'storybase_story.sectionrelation': {
'Meta': {'object_name': 'SectionRelation'},
'child': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'section_parent'", 'to': "orm['storybase_story.Section']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'section_child'", 'to': "orm['storybase_story.Section']"}),
'weight': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'storybase_story.sectiontranslation': {
'Meta': {'unique_together': "(('section', 'language'),)", 'object_name': 'SectionTranslation'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'en'", 'max_length': '15'}),
'section': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_story.Section']"}),
'title': ('storybase.fields.ShortTextField', [], {}),
'translation_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'})
},
'storybase_story.story': {
'Meta': {'object_name': 'Story'},
'assets': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'stories'", 'blank': 'True', 'to': "orm['storybase_asset.Asset']"}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'stories'", 'null': 'True', 'to': "orm['auth.User']"}),
'byline': ('django.db.models.fields.TextField', [], {}),
'call_to_action': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'contact_info': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'featured_assets': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'featured_in_stories'", 'blank': 'True', 'to': "orm['storybase_asset.Asset']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'license': ('django.db.models.fields.CharField', [], {'default': "'CC BY-NC-SA'", 'max_length': '25'}),
'on_homepage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'organizations': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'stories'", 'blank': 'True', 'to': "orm['storybase_user.Organization']"}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'stories'", 'blank': 'True', 'to': "orm['storybase_user.Project']"}),
'published': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "u'draft'", 'max_length': '10'}),
'story_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'structure_type': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
'storybase_story.storytranslation': {
'Meta': {'unique_together': "(('story', 'language'),)", 'object_name': 'StoryTranslation'},
'call_to_action': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'en'", 'max_length': '15'}),
'story': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_story.Story']"}),
'summary': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'title': ('storybase.fields.ShortTextField', [], {}),
'translation_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'})
},
'storybase_user.organization': {
'Meta': {'object_name': 'Organization'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'curated_stories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'curated_in_organizations'", 'blank': 'True', 'through': "orm['storybase_user.OrganizationStory']", 'to': "orm['storybase_story.Story']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'organizations'", 'blank': 'True', 'to': "orm['auth.User']"}),
'organization_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}),
'website_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'storybase_user.organizationstory': {
'Meta': {'object_name': 'OrganizationStory'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_user.Organization']"}),
'story': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_story.Story']"}),
'weight': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'storybase_user.project': {
'Meta': {'object_name': 'Project'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'curated_stories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'curated_in_projects'", 'blank': 'True', 'through': "orm['storybase_user.ProjectStory']", 'to': "orm['storybase_story.Story']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'projects'", 'blank': 'True', 'to': "orm['auth.User']"}),
'organizations': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'projects'", 'blank': 'True', 'to': "orm['storybase_user.Organization']"}),
'project_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}),
'website_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'storybase_user.projectstory': {
'Meta': {'object_name': 'ProjectStory'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_user.Project']"}),
'story': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_story.Story']"}),
'weight': ('django.db.models.fields.IntegerField', [], {'default': '0'})
}
}
complete_apps = ['storybase_story']
|
[
"geoffhing@gmail.com"
] |
geoffhing@gmail.com
|
9db1a5211567b08ec0bd272c70b33e7b4b2417b3
|
fb82fdf706863465b1f357cd1fa0447474cd8a70
|
/ServerComponent/venv/Lib/site-packages/rsrc/contrib/db/sqla/serializer.py
|
86d9f430b9df9c7e03c90d865f5bb3be82ec6e8c
|
[
"MIT"
] |
permissive
|
CDU55/FakeNews
|
d79e2a069b3f1392f779d5b2256cd54c696e789a
|
707bd48dd78851081d98ad21bbdadfc2720bd644
|
refs/heads/main
| 2023-02-20T06:27:18.618837
| 2021-01-17T15:14:27
| 2021-01-17T15:14:27
| 305,167,221
| 0
| 1
|
MIT
| 2020-12-07T19:51:46
| 2020-10-18T18:16:49
|
Python
|
UTF-8
|
Python
| false
| false
| 436
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from rsrc import settings
from jsonsir import Serializer
from jsonsir.contrib.intencoder import IntEncoder
from jsonsir.contrib.boolencoder import BoolEncoder
from jsonsir.contrib.datetimeencoder import DateTimeEncoder
# instantiate `Serializer` (bound with specified encoders)
serializer = Serializer([
IntEncoder(),
BoolEncoder(),
DateTimeEncoder(settings.DATE_FORMAT),
])
|
[
"48147775+BiancaChirica@users.noreply.github.com"
] |
48147775+BiancaChirica@users.noreply.github.com
|
7a99160a9b41a3afe3b729d916ebd377593d9fa2
|
bc233c24523f05708dd1e091dca817f9095e6bb5
|
/bitmovin_api_sdk/encoding/manifests/dash/periods/adaptationsets/representations/sprite/sprite_api.py
|
c0609f57f38b0754440948820163b44f8bde915a
|
[
"MIT"
] |
permissive
|
bitmovin/bitmovin-api-sdk-python
|
e3d6cf8eb8bdad62cb83ec77c0fc4950b06b9cdd
|
b0860c0b1be7747cf22ad060985504da625255eb
|
refs/heads/main
| 2023-09-01T15:41:03.628720
| 2023-08-30T10:52:13
| 2023-08-30T10:52:13
| 175,209,828
| 13
| 14
|
MIT
| 2021-04-29T12:30:31
| 2019-03-12T12:47:18
|
Python
|
UTF-8
|
Python
| false
| false
| 5,921
|
py
|
# coding: utf-8
from __future__ import absolute_import
from bitmovin_api_sdk.common import BaseApi, BitmovinApiLoggerBase
from bitmovin_api_sdk.common.poscheck import poscheck_except
from bitmovin_api_sdk.models.bitmovin_response import BitmovinResponse
from bitmovin_api_sdk.models.response_envelope import ResponseEnvelope
from bitmovin_api_sdk.models.response_error import ResponseError
from bitmovin_api_sdk.models.sprite_representation import SpriteRepresentation
from bitmovin_api_sdk.encoding.manifests.dash.periods.adaptationsets.representations.sprite.sprite_representation_list_query_params import SpriteRepresentationListQueryParams
class SpriteApi(BaseApi):
@poscheck_except(2)
def __init__(self, api_key, tenant_org_id=None, base_url=None, logger=None):
# type: (str, str, str, BitmovinApiLoggerBase) -> None
super(SpriteApi, self).__init__(
api_key=api_key,
tenant_org_id=tenant_org_id,
base_url=base_url,
logger=logger
)
def create(self, manifest_id, period_id, adaptationset_id, sprite_representation, **kwargs):
# type: (string_types, string_types, string_types, SpriteRepresentation, dict) -> SpriteRepresentation
"""Add Sprite Representation
:param manifest_id: Id of the manifest
:type manifest_id: string_types, required
:param period_id: Id of the period
:type period_id: string_types, required
:param adaptationset_id: Id of the image adaptation set
:type adaptationset_id: string_types, required
:param sprite_representation: The Sprite representation to be added to the adaptation set. Note that the adaptation set has to be an image adaptation set. Only supported for sprites generated with encoder version `2.76.0` or above.
:type sprite_representation: SpriteRepresentation, required
:return: Sprite representation
:rtype: SpriteRepresentation
"""
return self.api_client.post(
'/encoding/manifests/dash/{manifest_id}/periods/{period_id}/adaptationsets/{adaptationset_id}/representations/sprite',
sprite_representation,
path_params={'manifest_id': manifest_id, 'period_id': period_id, 'adaptationset_id': adaptationset_id},
type=SpriteRepresentation,
**kwargs
)
def delete(self, manifest_id, period_id, adaptationset_id, representation_id, **kwargs):
# type: (string_types, string_types, string_types, string_types, dict) -> BitmovinResponse
"""Delete Sprite Representation
:param manifest_id: Id of the manifest
:type manifest_id: string_types, required
:param period_id: Id of the period
:type period_id: string_types, required
:param adaptationset_id: Id of the adaptation set
:type adaptationset_id: string_types, required
:param representation_id: Id of the Sprite representation to be deleted
:type representation_id: string_types, required
:return: Id of the Sprite Representation
:rtype: BitmovinResponse
"""
return self.api_client.delete(
'/encoding/manifests/dash/{manifest_id}/periods/{period_id}/adaptationsets/{adaptationset_id}/representations/sprite/{representation_id}',
path_params={'manifest_id': manifest_id, 'period_id': period_id, 'adaptationset_id': adaptationset_id, 'representation_id': representation_id},
type=BitmovinResponse,
**kwargs
)
def get(self, manifest_id, period_id, adaptationset_id, representation_id, **kwargs):
# type: (string_types, string_types, string_types, string_types, dict) -> SpriteRepresentation
"""Sprite Representation Details
:param manifest_id: Id of the manifest
:type manifest_id: string_types, required
:param period_id: Id of the period
:type period_id: string_types, required
:param adaptationset_id: Id of the adaptation set
:type adaptationset_id: string_types, required
:param representation_id: Id of the Sprite representation
:type representation_id: string_types, required
:return: Sprite Representation details
:rtype: SpriteRepresentation
"""
return self.api_client.get(
'/encoding/manifests/dash/{manifest_id}/periods/{period_id}/adaptationsets/{adaptationset_id}/representations/sprite/{representation_id}',
path_params={'manifest_id': manifest_id, 'period_id': period_id, 'adaptationset_id': adaptationset_id, 'representation_id': representation_id},
type=SpriteRepresentation,
**kwargs
)
def list(self, manifest_id, period_id, adaptationset_id, query_params=None, **kwargs):
# type: (string_types, string_types, string_types, SpriteRepresentationListQueryParams, dict) -> SpriteRepresentation
"""List all Sprite Representations
:param manifest_id: Id of the manifest
:type manifest_id: string_types, required
:param period_id: Id of the period
:type period_id: string_types, required
:param adaptationset_id: Id of the adaptation set
:type adaptationset_id: string_types, required
:param query_params: Query parameters
:type query_params: SpriteRepresentationListQueryParams
:return: List of Sprite Representations
:rtype: SpriteRepresentation
"""
return self.api_client.get(
'/encoding/manifests/dash/{manifest_id}/periods/{period_id}/adaptationsets/{adaptationset_id}/representations/sprite',
path_params={'manifest_id': manifest_id, 'period_id': period_id, 'adaptationset_id': adaptationset_id},
query_params=query_params,
pagination_response=True,
type=SpriteRepresentation,
**kwargs
)
|
[
"openapi@bitmovin.com"
] |
openapi@bitmovin.com
|
63183dac12e1ac146a8ee95e08c3d767c7460535
|
76e498240a644b7ccf7e6af69f958f72af595a3c
|
/2018/function.py
|
44fe021fe2be5eac9710967e541d465baee557c2
|
[] |
no_license
|
VladyslavHnatchenko/united
|
54a868b9bdb54b510fb33f6b74562f2fb2c23c01
|
64d3319b18fcc8e1dbb96a63f7bef0c2e5766520
|
refs/heads/master
| 2020-04-13T07:24:28.053997
| 2019-08-23T07:22:10
| 2019-08-23T07:22:10
| 163,050,929
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 641
|
py
|
def function_a():
global a
a = 1
b = 2
return a+b
def function_b():
c = 3
return a+c
print(function_a())
print(function_b())
# def many(*args, **kwargs):
# print(args)
# print(kwargs)
#
#
# many(1, 2, 3, name="Mike", job="programmer")
# def keyword_function(a=1, b=2):
# return a+b
#
#
# print(keyword_function(b=4, a=5))
# def add(a, b):
# return a + b
#
#
# print(add(a=2, b=3))
# total = add(b=4, a=5)
# print(total)
# print(add(1, 2))
# add(1)
# def empty_function():
# pass
#
#
# def a_function():
# print("You just created a function!")
#
#
# # a_function()
# empty_function()
|
[
"hnatchenko.vladyslav@gmail.com"
] |
hnatchenko.vladyslav@gmail.com
|
6e9202c9029c4103e41f6eb7df2b3592fa136a5c
|
946469c469a07e70260143805c0b395508aad27f
|
/tf01_helloword/tf_01_helloword.py
|
4812457f6c8033ceb7f726ff5dfe5858b9446803
|
[] |
no_license
|
jpegbert/TensorFlow1.x
|
6f4bf2d658ac3cea298b0247c405f48cefa5db7f
|
bded173429581805324fda4bccd8180eafdd3496
|
refs/heads/master
| 2023-03-21T15:19:39.440658
| 2021-03-19T12:57:02
| 2021-03-19T12:57:02
| 348,709,037
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 171
|
py
|
import tensorflow as tf
message = tf.constant('Welcome to the exciting world of Deep Neural Networks!')
with tf.Session() as sess:
print(sess.run(message).decode())
|
[
"jiangpeng.jiang@zhaopin.com.cn"
] |
jiangpeng.jiang@zhaopin.com.cn
|
5930cfc112f15a47ff6b5e6f315d023db88b1b72
|
dd80a584130ef1a0333429ba76c1cee0eb40df73
|
/development/samples/SampleSyncAdapter/samplesyncadapter_server/model/datastore.py
|
1f916332184373abece4825b266b23e6cfb2503a
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
karunmatharu/Android-4.4-Pay-by-Data
|
466f4e169ede13c5835424c78e8c30ce58f885c1
|
fcb778e92d4aad525ef7a995660580f948d40bc9
|
refs/heads/master
| 2021-03-24T13:33:01.721868
| 2017-02-18T17:48:49
| 2017-02-18T17:48:49
| 81,847,777
| 0
| 2
|
MIT
| 2020-03-09T00:02:12
| 2017-02-13T16:47:00
| null |
UTF-8
|
Python
| false
| false
| 1,954
|
py
|
#!/usr/bin/python2.5
# Copyright (C) 2010 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Represents user's contact information"""
from google.appengine.ext import db
class Contact(db.Model):
"""Data model class to hold user objects."""
handle = db.StringProperty(required=True)
firstname = db.StringProperty()
lastname = db.StringProperty()
phone_home = db.PhoneNumberProperty()
phone_office = db.PhoneNumberProperty()
phone_mobile = db.PhoneNumberProperty()
email = db.EmailProperty()
status = db.TextProperty()
avatar = db.BlobProperty()
deleted = db.BooleanProperty()
updated = db.DateTimeProperty(auto_now_add=True)
@classmethod
def get_contact_info(cls, username):
if username not in (None, ''):
query = cls.gql('WHERE handle = :1', username)
return query.get()
return None
@classmethod
def get_contact_last_updated(cls, username):
if username not in (None, ''):
query = cls.gql('WHERE handle = :1', username)
return query.get().updated
return None
@classmethod
def get_contact_id(cls, username):
if username not in (None, ''):
query = cls.gql('WHERE handle = :1', username)
return query.get().key().id()
return None
@classmethod
def get_contact_status(cls, username):
if username not in (None, ''):
query = cls.gql('WHERE handle = :1', username)
return query.get().status
return None
|
[
"karun.matharu@gmail.com"
] |
karun.matharu@gmail.com
|
946474b8afc07b9a6402d4e856c9d8b4e23a8aa7
|
aed0850065dd467c0d0650c41987b61e94cad9c6
|
/day 16/merging.py
|
a62f058715350f049773cbc9a756eca3ed872a3c
|
[] |
no_license
|
parmarjh/100day-coding-challenge
|
96c79cc86a8f1e0b062b72dd5992610597e289e8
|
8b3e1f6654e4a55a08b4f938f13626fcc2aa8468
|
refs/heads/master
| 2023-02-18T19:51:22.200057
| 2020-12-31T12:54:10
| 2020-12-31T12:54:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,055
|
py
|
class Node:
def __init__(self, data):
self.data = data
self.next = None
class LinkedList:
def __init__(self):
self.head = None
def printList(self):
temp = self.head
while temp:
print(temp.data, end=" ")
temp = temp.next
def addToList(self, newData):
newNode = Node(newData)
if self.head is None:
self.head = newNode
return
last = self.head
while last.next:
last = last.next
last.next = newNode
def mergeLists(headA, headB):
dummyNode = Node(0)
tail = dummyNode
while True:
if headA is None:
tail.next = headB
break
if headB is None:
tail.next = headA
break
if headA.data <= headB.data:
tail.next = headA
headA = headA.next
else:
tail.next = headB
headB = headB.next
tail = tail.next
return dummyNode.next
listA = LinkedList()
listB = LinkedList()
listA.addToList(5)
listA.addToList(10)
listA.addToList(15)
listB.addToList(2)
listB.addToList(3)
listB.addToList(20)
listA.head = mergeLists(listA.head, listB.head)
print("Merged Linked List is:")
listA.printList()
|
[
"annamalaipalani11@gmail.com"
] |
annamalaipalani11@gmail.com
|
f295ae8bb445794a84f3e45c99863c3f72ad0726
|
9bbd4f00fd88474b3ab1f007cb6848cf6c2304e8
|
/run.py
|
93c10171ae06cd2b21a361602775df251bd21300
|
[] |
no_license
|
cappuccino213/AutoGTF
|
3128deb15bf6ebd67ed811773b3ef972f51fa9b7
|
e9ee23860c5f59011367fb84c646f942fb5890ef
|
refs/heads/master
| 2020-04-16T17:52:30.603769
| 2019-01-17T03:01:46
| 2019-01-17T03:01:46
| 165,792,698
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,936
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/1/14 14:20
# @Author : Zhangyp
# @File : run.py
# @Software: PyCharm
# @license : Copyright(C), eWord Technology Co., Ltd.
# @Contact : yeahcheung213@163.com
from mssql import *
# from ReadConf import *
# import os
from shellcopy import *
import shutil
import decimal
PARA = conf()# 获取配置文件
def get_filepath():
"""获取文件和路径dict"""
ms = MSSQL(host=PARA['host'], user=PARA['user'], pwd=PARA['password'], db=PARA['dbname'])
path = ms.ExecQuery(PARA['query_statement'])
try:
lp = []
ln = []
for i in range(len(path)):
n = path[i][0].split('\\')[-1]
ln.append(n)
p = path[i][0].split(n)[0]
p = p.split('\\',1)[1]
lp.append(p)
return ln, lp
except Exception as e:
logging.error(str(e))
def find_file():
"""遍历file目录下所有文件"""
try:
cp = os.getcwd()
f = [i for i in os.listdir(os.path.join(cp, 'file')) if not os.path.isdir(i)]
return f
except OSError as e:
logging.error(str(e))
# return []
def generate_file(src_file, num):
"""根据原文件生成指定数量的文件"""
filepath = src_file.rpartition('\\')[0]+'\\' # 提取路径
filename = src_file.split('\\')[-1].split('.')[0] # 提取文件名
filesuffix = src_file.split('\\')[-1].split('.')[1] # 提取后缀名
for i in range(num):
dst_file = filepath+filename+str(i)+'.'+filesuffix # 新生成的文件
shutil.copyfile(src_file, dst_file)
def main():
"""将指定文件重命名,复制到指定文件夹"""
(filename, paths) = get_filepath() # 获取目标文件的名字列表、相对路径
try:
abspath = [os.getcwd() + '\\newfile' + paths[i] for i in range(len(paths))]# 目标文件的绝对路径
try:
for i in range(len(abspath)):
os.makedirs(abspath[i])# 创建目标文件路径
logging.info('任务%s:文件夹->%s 创建成功'%(str(i), abspath[i]))
except OSError as e:
logging.warning(str(e))
srcname = find_file()
if len(srcname) == len(paths):
for i in range(len(srcname)):
'''重命名文件'''
oldname = os.path.join(os.getcwd(), 'file', srcname[i]) # 旧文件名
newname = os.path.join(abspath[i], filename[i]) # 新文件名
try:
os.rename(oldname, newname)
logging.info('任务%s:重命名文件%s' % (str(i), newname))
except FileExistsError as e:
logging.warning('%s【建议】清空newfile目录后重试'%str(e))
if PARA['isshare'] == '1':
openshare(PARA['path'], PARA['shareuser'], PARA['sharepwd'])
shellcopy(os.getcwd() + '\\newfile', PARA['path'])
closeshare()
elif PARA['isshare'] == '0':
mkdir(PARA['path'])
shellcopy(os.getcwd() + '\\newfile', PARA['path'])
else:
pass
else:
logging.warning('源文件与目的生成文件数量不符')
except Exception as e:
logging.info(str(e))
if __name__ == '__main__':
# main()
generate_file(r'E:\1\2ewrfewr.dcm', 2000)
|
[
"yeahcheung213@163.com"
] |
yeahcheung213@163.com
|
4fb445678a4cd023a19dc7dd202db200d82ccfba
|
9331f7179c2490f9bc0141ce91ebea704124e168
|
/clr.py
|
3aaea86cb672e27e196ed3354ac23a1e91344fba
|
[] |
no_license
|
peternara/Contrastive-learning-for-image-retrieval-self-training
|
50fbb35ca4f4f8cef6f70e7f037bb65f1f58bc21
|
f04bdd62a1a647207c599570394c93327fb02044
|
refs/heads/main
| 2023-03-24T13:01:19.448203
| 2021-03-19T07:29:32
| 2021-03-19T07:29:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,435
|
py
|
import os
os.environ['CUDA_LAUNCH_BLOCKING']='1'
import torch
from models.BiTmodel import BiTSimCLR
from models.ViTmodel import VisionTransformerSimCLR
from models.Efficientmodel import EfficientCLR
from models.CGDmodel import CGDmodel
from torch.utils.tensorboard import SummaryWriter
import torch.nn.functional as F
from loss.supconloss import SupConLoss
from utils.utils import get_device, count_parameters, save_config_file, AverageMeter, set_bn_eval
import pytorch_warmup as warmup
import sys
from tqdm import tqdm
import logging
import numpy as np
torch.manual_seed(0)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
apex_support = False
try:
sys.path.append('./apex')
from apex import amp
apex_support = True
except:
print("Please install apex for mixed precision training from: https://github.com/NVIDIA/apex")
apex_support = False
class SimCLR(object):
def __init__(self, dataset, config):
self.config = config
self.device = get_device()
self.writer = SummaryWriter()
self.dataset = dataset
self.train_config = config["SimCLR"]
self.loss_config = config['subcon-loss']
self.criterion = SupConLoss(self.loss_config['temperature'],
contrast_mode=self.loss_config['mode'],
base_temperature=self.loss_config['base'],
device=self.device).to(self.device)
if(config['model_name'] == 'ViT'):
model = VisionTransformerSimCLR(config).to(self.device)
elif(config['model_name'] == 'Eff'):
model = EfficientCLR(config).to(self.device)
elif(config['model_name'] == 'CGD'):
model = CGDmodel(config).to(self.device)
else:
model = BiTSimCLR(config).to(self.device)
self.model = self._load_pre_trained_weights(model)
num_params = count_parameters(self.model)
logger.info("Total Parameter: \t%2.1fM" % num_params)
def _step(self, xi, xj, labels=None):
images = torch.cat([xi, xj], dim=0)
images = images.to(self.device)
bsz = self.config['batch_size']
features, _ = self.model(images)
f1, f2 = torch.split(features, [bsz, bsz], dim=0)
features = torch.cat([f1.unsqueeze(1), f2.unsqueeze(1)], dim=1)
if self.loss_config["method"] == 'SupCon':
labels = labels.to(self.device)
loss = self.criterion(features, labels)
elif self.loss_config["method"] == 'SimCRL':
loss = self.criterion(features)
return loss
def train(self):
#load data loader
train_loader, valid_loader = self.dataset.get_train_validation_data_loaders()
#define optimier
optimizer = torch.optim.AdamW(filter(lambda p: p.requires_grad, self.model.parameters()), self.train_config['lr'], weight_decay=eval(self.train_config['weight_decay']))
n_steps = self.train_config["epochs"] * len(train_loader)
#learning rate schudler
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=n_steps)
warmup_scheduler = warmup.UntunedLinearWarmup(optimizer)
if apex_support and self.config['fp16_precision']:
self.model, optimizer = amp.initialize(self.model, optimizer,
opt_level='O2',
keep_batchnorm_fp32=True)
model_checkpoints_folder = os.path.join(self.writer.log_dir, 'checkpoints')
# save config file
save_config_file(model_checkpoints_folder)
logger.info("***** Running training *****")
logger.info(" Total optimization steps = %d", n_steps)
n_iter = 0
valid_n_iter = 0
best_valid_loss = np.inf
losses = AverageMeter()
for epoch_counter in range(self.train_config['epochs']):
self.model.train()
# self.model.apply(set_bn_eval)
epoch_iterator = tqdm(train_loader,
desc="Training (X / X Steps) (loss=X.X)",
bar_format="{l_bar}{r_bar}",
dynamic_ncols=True)
for [xis, xjs], labels in epoch_iterator:
optimizer.zero_grad()
loss = self._step(xis, xjs, labels)
losses.update(loss.item(), self.config["batch_size"])
if n_iter % self.train_config['log_every_n_steps'] == 0:
self.writer.add_scalar('train_loss', loss, global_step=n_iter)
if apex_support and self.train_config['fp16_precision']:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
optimizer.step()
n_iter += 1
epoch_iterator.set_description(
"Training (%d / %d Epochs) (loss=%2.5f)" % (epoch_counter, self.train_config['epochs'], losses.val)
)
# warmup for the first 10 epochs
scheduler.step(scheduler.last_epoch+1)
warmup_scheduler.dampen()
# validate the model if requested
if epoch_counter % self.train_config['eval_every_n_epochs'] == 0:
valid_loss = self._validate(valid_loader)
if valid_loss < best_valid_loss:
# save the model weights
best_valid_loss = valid_loss
torch.save(self.model.state_dict(), os.path.join(model_checkpoints_folder, 'model.pth'))
self.writer.add_scalar('validation_loss', valid_loss, global_step=valid_n_iter)
valid_n_iter += 1
self.writer.add_scalar('cosine_lr_decay', scheduler.get_lr()[0], global_step=n_iter)
def _load_pre_trained_weights(self, model):
try:
checkpoints_folder = os.path.join('./runs', self.train_config['fine_tune_from'], 'checkpoints')
state_dict = torch.load(os.path.join(checkpoints_folder, 'model.pth'))
model.load_state_dict(state_dict)
logger.info("Loaded pre-trained model with success.")
except FileNotFoundError:
logger.info("Pre-trained weights not found. Training from scratch.")
return model
def _validate(self, valid_loader):
eval_losses = AverageMeter()
logger.info("***** Running Validation *****")
# validation steps
with torch.no_grad():
self.model.eval()
epoch_iterator = tqdm(valid_loader,
desc="Validating... (loss=X.X)",
bar_format="{l_bar}{r_bar}",
dynamic_ncols=True)
for [xis, xjs], labels in epoch_iterator:
loss = self._step(xis, xjs, labels)
eval_losses.update(loss.item(), self.config["batch_size"])
epoch_iterator.set_description("Validating... (loss=%2.5f)" % eval_losses.val)
logger.info("\n")
logger.info("Validation Results")
logger.info("Valid Loss: %2.5f" % eval_losses.avg)
return eval_losses.avg
|
[
"noreply@github.com"
] |
peternara.noreply@github.com
|
af0851ba8867ea8c5138700c332c611e7c3453c5
|
b5fb45288ed2a204692051ab78e72d8aa6e5accd
|
/mmdet/models/anchor_heads/anchor_head.py
|
7b1e48d5270fcdf19061e7c3c1ecbade8dfe403f
|
[
"Apache-2.0"
] |
permissive
|
nithinksath96/MMdetection_TensorRT_FP16
|
d4987f003798f5d6d4fe5bde2f30dd5ee2e8596d
|
c8379b209d4deeff9350baf5bbedfc95fb8941f4
|
refs/heads/master
| 2023-02-13T20:00:21.834541
| 2021-01-06T09:24:20
| 2021-01-06T09:24:20
| 327,260,988
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,964
|
py
|
from __future__ import division
import numpy as np
import torch
import torch.nn as nn
from mmcv.cnn import normal_init
from mmdet.core import (AnchorGenerator, anchor_target, delta2bbox, force_fp32,
multi_apply, multiclass_nms)
from ..builder import build_loss
from ..registry import HEADS
@HEADS.register_module
class AnchorHead(nn.Module):
"""Anchor-based head (RPN, RetinaNet, SSD, etc.).
Args:
num_classes (int): Number of categories including the background
category.
in_channels (int): Number of channels in the input feature map.
feat_channels (int): Number of hidden channels. Used in child classes.
anchor_scales (Iterable): Anchor scales.
anchor_ratios (Iterable): Anchor aspect ratios.
anchor_strides (Iterable): Anchor strides.
anchor_base_sizes (Iterable): Anchor base sizes.
target_means (Iterable): Mean values of regression targets.
target_stds (Iterable): Std values of regression targets.
loss_cls (dict): Config of classification loss.
loss_bbox (dict): Config of localization loss.
""" # noqa: W605
def __init__(self,
num_classes,
in_channels,
feat_channels=256,
anchor_scales=[8, 16, 32],
anchor_ratios=[0.5, 1.0, 2.0],
anchor_strides=[4, 8, 16, 32, 64],
anchor_base_sizes=None,
target_means=(.0, .0, .0, .0),
target_stds=(1.0, 1.0, 1.0, 1.0),
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=1.0),
loss_bbox=dict(
type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)):
super(AnchorHead, self).__init__()
self.in_channels = in_channels
self.num_classes = num_classes
self.feat_channels = feat_channels
self.anchor_scales = anchor_scales
self.anchor_ratios = anchor_ratios
self.anchor_strides = anchor_strides
self.anchor_base_sizes = list(
anchor_strides) if anchor_base_sizes is None else anchor_base_sizes
self.target_means = target_means
self.target_stds = target_stds
self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False)
self.sampling = loss_cls['type'] not in ['FocalLoss', 'GHMC']
if self.use_sigmoid_cls:
self.cls_out_channels = num_classes - 1
else:
self.cls_out_channels = num_classes
if self.cls_out_channels <= 0:
raise ValueError('num_classes={} is too small'.format(num_classes))
self.loss_cls = build_loss(loss_cls)
self.loss_bbox = build_loss(loss_bbox)
self.fp16_enabled = False
self.anchor_generators = []
for anchor_base in self.anchor_base_sizes:
self.anchor_generators.append(
AnchorGenerator(anchor_base, anchor_scales, anchor_ratios))
self.num_anchors = len(self.anchor_ratios) * len(self.anchor_scales)
self._init_layers()
def _init_layers(self):
self.conv_cls = nn.Conv2d(self.in_channels,
self.num_anchors * self.cls_out_channels, 1)
self.conv_reg = nn.Conv2d(self.in_channels, self.num_anchors * 4, 1)
def init_weights(self):
normal_init(self.conv_cls, std=0.01)
normal_init(self.conv_reg, std=0.01)
def forward_single(self, x):
cls_score = self.conv_cls(x)
bbox_pred = self.conv_reg(x)
return cls_score, bbox_pred
def forward(self, feats):
return multi_apply(self.forward_single, feats)
def get_anchors(self, featmap_sizes, img_metas, device='cuda'):
"""Get anchors according to feature map sizes.
Args:
featmap_sizes (list[tuple]): Multi-level feature map sizes.
img_metas (list[dict]): Image meta info.
device (torch.device | str): device for returned tensors
Returns:
tuple: anchors of each image, valid flags of each image
"""
num_imgs = len(img_metas)
num_levels = len(featmap_sizes)
# since feature map sizes of all images are the same, we only compute
# anchors for one time
multi_level_anchors = []
for i in range(num_levels):
anchors = self.anchor_generators[i].grid_anchors(
featmap_sizes[i], self.anchor_strides[i], device=device)
multi_level_anchors.append(anchors)
anchor_list = [multi_level_anchors for _ in range(num_imgs)]
# for each image, we compute valid flags of multi level anchors
valid_flag_list = []
for img_id, img_meta in enumerate(img_metas):
multi_level_flags = []
for i in range(num_levels):
anchor_stride = self.anchor_strides[i]
feat_h, feat_w = featmap_sizes[i]
h, w = img_meta['pad_shape'][:2]
valid_feat_h = min(int(np.ceil(h / anchor_stride)), feat_h)
valid_feat_w = min(int(np.ceil(w / anchor_stride)), feat_w)
flags = self.anchor_generators[i].valid_flags(
(feat_h, feat_w), (valid_feat_h, valid_feat_w),
device=device)
multi_level_flags.append(flags)
valid_flag_list.append(multi_level_flags)
return anchor_list, valid_flag_list
def loss_single(self, cls_score, bbox_pred, labels, label_weights,
bbox_targets, bbox_weights, num_total_samples, cfg):
# classification loss
labels = labels.reshape(-1)
label_weights = label_weights.reshape(-1)
cls_score = cls_score.permute(0, 2, 3,
1).reshape(-1, self.cls_out_channels)
loss_cls = self.loss_cls(
cls_score, labels, label_weights, avg_factor=num_total_samples)
# regression loss
bbox_targets = bbox_targets.reshape(-1, 4)
bbox_weights = bbox_weights.reshape(-1, 4)
bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)
loss_bbox = self.loss_bbox(
bbox_pred,
bbox_targets,
bbox_weights,
avg_factor=num_total_samples)
return loss_cls, loss_bbox
@force_fp32(apply_to=('cls_scores', 'bbox_preds'))
def loss(self,
cls_scores,
bbox_preds,
gt_bboxes,
gt_labels,
img_metas,
cfg,
gt_bboxes_ignore=None):
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
assert len(featmap_sizes) == len(self.anchor_generators)
device = cls_scores[0].device
anchor_list, valid_flag_list = self.get_anchors(
featmap_sizes, img_metas, device=device)
label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
cls_reg_targets = anchor_target(
anchor_list,
valid_flag_list,
gt_bboxes,
img_metas,
self.target_means,
self.target_stds,
cfg,
gt_bboxes_ignore_list=gt_bboxes_ignore,
gt_labels_list=gt_labels,
label_channels=label_channels,
sampling=self.sampling)
if cls_reg_targets is None:
return None
(labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
num_total_pos, num_total_neg) = cls_reg_targets
num_total_samples = (
num_total_pos + num_total_neg if self.sampling else num_total_pos)
losses_cls, losses_bbox = multi_apply(
self.loss_single,
cls_scores,
bbox_preds,
labels_list,
label_weights_list,
bbox_targets_list,
bbox_weights_list,
num_total_samples=num_total_samples,
cfg=cfg)
return dict(loss_cls=losses_cls, loss_bbox=losses_bbox)
@force_fp32(apply_to=('cls_scores', 'bbox_preds'))
def get_bboxes(self,
cls_scores,
bbox_preds,
img_metas,
cfg,
rescale=False):
"""
Transform network output for a batch into labeled boxes.
Args:
cls_scores (list[Tensor]): Box scores for each scale level
Has shape (N, num_anchors * num_classes, H, W)
bbox_preds (list[Tensor]): Box energies / deltas for each scale
level with shape (N, num_anchors * 4, H, W)
img_metas (list[dict]): size / scale info for each image
cfg (mmcv.Config): test / postprocessing configuration
rescale (bool): if True, return boxes in original image space
Returns:
list[tuple[Tensor, Tensor]]: each item in result_list is 2-tuple.
The first item is an (n, 5) tensor, where the first 4 columns
are bounding box positions (tl_x, tl_y, br_x, br_y) and the
5-th column is a score between 0 and 1. The second item is a
(n,) tensor where each item is the class index of the
corresponding box.
Example:
>>> import mmcv
>>> self = AnchorHead(num_classes=9, in_channels=1)
>>> img_metas = [{'img_shape': (32, 32, 3), 'scale_factor': 1}]
>>> cfg = mmcv.Config(dict(
>>> score_thr=0.00,
>>> nms=dict(type='nms', iou_thr=1.0),
>>> max_per_img=10))
>>> feat = torch.rand(1, 1, 3, 3)
>>> cls_score, bbox_pred = self.forward_single(feat)
>>> # note the input lists are over different levels, not images
>>> cls_scores, bbox_preds = [cls_score], [bbox_pred]
>>> result_list = self.get_bboxes(cls_scores, bbox_preds,
>>> img_metas, cfg)
>>> det_bboxes, det_labels = result_list[0]
>>> assert len(result_list) == 1
>>> assert det_bboxes.shape[1] == 5
>>> assert len(det_bboxes) == len(det_labels) == cfg.max_per_img
"""
assert len(cls_scores) == len(bbox_preds)
num_levels = len(cls_scores)
device = cls_scores[0].device
mlvl_anchors = [
self.anchor_generators[i].grid_anchors(
cls_scores[i].size()[-2:],
self.anchor_strides[i],
device=device) for i in range(num_levels)
]
result_list = []
for img_id in range(len(img_metas)):
cls_score_list = [
cls_scores[i][img_id].detach() for i in range(num_levels)
]
bbox_pred_list = [
bbox_preds[i][img_id].detach() for i in range(num_levels)
]
img_shape = img_metas[img_id]['img_shape']
scale_factor = img_metas[img_id]['scale_factor']
proposals = self.get_bboxes_single(cls_score_list, bbox_pred_list,
mlvl_anchors, img_shape,
scale_factor, cfg, rescale)
#print("No of proposals in anchor head", len(proposals))
result_list.append(proposals)
return result_list
def get_bboxes_single(self,
cls_score_list,
bbox_pred_list,
mlvl_anchors,
img_shape,
scale_factor,
cfg,
rescale=False):
"""
Transform outputs for a single batch item into labeled boxes.
"""
assert len(cls_score_list) == len(bbox_pred_list) == len(mlvl_anchors)
mlvl_bboxes = []
mlvl_scores = []
for cls_score, bbox_pred, anchors in zip(cls_score_list,
bbox_pred_list, mlvl_anchors):
assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
cls_score = cls_score.permute(1, 2,
0).reshape(-1, self.cls_out_channels)
if self.use_sigmoid_cls:
scores = cls_score.sigmoid()
else:
scores = cls_score.softmax(-1)
bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)
nms_pre = cfg.get('nms_pre', -1)
if nms_pre > 0 and scores.shape[0] > nms_pre:
# Get maximum scores for foreground classes.
if self.use_sigmoid_cls:
max_scores, _ = scores.max(dim=1)
else:
max_scores, _ = scores[:, 1:].max(dim=1)
_, topk_inds = max_scores.topk(nms_pre)
anchors = anchors[topk_inds, :]
bbox_pred = bbox_pred[topk_inds, :]
scores = scores[topk_inds, :]
bboxes = delta2bbox(anchors, bbox_pred, self.target_means,
self.target_stds, img_shape)
mlvl_bboxes.append(bboxes)
mlvl_scores.append(scores)
mlvl_bboxes = torch.cat(mlvl_bboxes)
if rescale:
mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor)
mlvl_scores = torch.cat(mlvl_scores)
if self.use_sigmoid_cls:
# Add a dummy background class to the front when using sigmoid
padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1)
mlvl_scores = torch.cat([padding, mlvl_scores], dim=1)
det_bboxes, det_labels = multiclass_nms(mlvl_bboxes, mlvl_scores,
cfg.score_thr, cfg.nms,
cfg.max_per_img)
return det_bboxes, det_labels
|
[
"nsathish@compute-0-32.local"
] |
nsathish@compute-0-32.local
|
c3f066c947f48b2d598a1bc6be518303b1f2221e
|
72ede563023f78da0d23f36df0106aa4cd386600
|
/src/mailme/utils/text.py
|
0e2e912bd533cc7e3669e3cef8cd63fa16722f59
|
[
"BSD-3-Clause"
] |
permissive
|
mailme/mailme.io
|
773e8266e6ec307762d220c0a6381170f6905de3
|
3b9cc8009226bf349e96504328146c61d8afcb02
|
refs/heads/master
| 2020-05-31T18:59:01.947128
| 2014-07-04T14:48:17
| 2014-07-04T14:48:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 550
|
py
|
# -*- coding: utf-8 -*-
"""
mailme.utils.text
~~~~~~~~~~~~~~~~~
Various text realated tools.
"""
import re
_str_num_re = re.compile(r'(?:[^\d]*(\d+)[^\d]*)+')
def increment_string(s):
"""Increment a number in a string or add a number."""
m = _str_num_re.search(s)
if m:
next = str(int(m.group(1)) + 1)
start, end = m.span(1)
if start or end:
return '{0}-{1}{2}'.format(
s[:max(end - len(next), start)],
next,
s[end:])
return s + '-2'
|
[
"cg@webshox.org"
] |
cg@webshox.org
|
15a028c91f53835f4b343ea7425d4e20c639cb9d
|
077c91b9d5cb1a6a724da47067483c622ce64be6
|
/fuzz_pyretic_mesh_proactive_firewall_no_close_check_loop_mcs_with_max_replays_5/interreplay_106_l_4/interactive_replay_config.py
|
6f2cfeb97d82fa75d61d3f7f457edfa22f7668a6
|
[] |
no_license
|
Spencerx/experiments
|
0edd16398725f6fd9365ddbb1b773942e4878369
|
aaa98b0f67b0d0c0c826b8a1565916bf97ae3179
|
refs/heads/master
| 2020-04-03T10:11:40.671606
| 2014-06-11T23:55:11
| 2014-06-11T23:55:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,029
|
py
|
from config.experiment_config_lib import ControllerConfig
from sts.topology import *
from sts.control_flow import InteractiveReplayer
from sts.simulation_state import SimulationConfig
from sts.input_traces.input_logger import InputLogger
simulation_config = SimulationConfig(controller_configs=[ControllerConfig(start_cmd='./pyretic.py -m p0 pyretic.examples.firewall_for_sts_no_close', label='c1', address='127.0.0.1', cwd='../pyretic', kill_cmd='ps aux | grep -e pox -e pyretic | grep -v simulator | cut -c 9-15 | xargs kill -9')],
topology_class=MeshTopology,
topology_params="num_switches=3",
patch_panel_class=BufferedPatchPanel,
multiplex_sockets=False,
kill_controllers_on_exit=True)
control_flow = InteractiveReplayer(simulation_config, "experiments/fuzz_pyretic_mesh_proactive_firewall_no_close_check_loop_mcs/interreplay_106_l_4/events.trace")
# wait_on_deterministic_values=False
# delay_flow_mods=False
# Invariant check: 'None'
|
[
"cs@cs.berkeley.edu"
] |
cs@cs.berkeley.edu
|
970f5a23bdfc872d4583272bcf1f1cde513713fe
|
a8062308fb3bf6c8952257504a50c3e97d801294
|
/test/test_1567_maximum_length_of_subarray_with_positive_product.py
|
be383471a80f55305d5a81b2bb7181ca2e4f326f
|
[] |
no_license
|
wan-catherine/Leetcode
|
650d697a873ad23c0b64d08ad525bf9fcdb62b1b
|
238995bd23c8a6c40c6035890e94baa2473d4bbc
|
refs/heads/master
| 2023-09-01T00:56:27.677230
| 2023-08-31T00:49:31
| 2023-08-31T00:49:31
| 143,770,000
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 881
|
py
|
from unittest import TestCase
from problems.N1567_Maximum_Length_Of_Subarray_With_Positive_Product import Solution
class TestSolution(TestCase):
def test_getMaxLen(self):
self.assertEqual(4, Solution().getMaxLen([1, -2, -3, 4]))
def test_getMaxLen_1(self):
nums = [0,1,-2,-3,-4]
self.assertEqual(3, Solution().getMaxLen(nums))
def test_getMaxLen_2(self):
nums = [-1,-2,-3,0,1]
self.assertEqual(2, Solution().getMaxLen(nums))
def test_getMaxLen_3(self):
nums = [-1,2]
self.assertEqual(1, Solution().getMaxLen(nums))
def test_getMaxLen_4(self):
nums = [1,2,3,5,-6,4,0,10]
self.assertEqual(4, Solution().getMaxLen(nums))
def test_getMaxLen_5(self):
nums = [5,-20,-20,-39,-5,0,0,0,36,-32,0,-7,-10,-7,21,20,-12,-34,26,2]
self.assertEqual(8, Solution().getMaxLen(nums))
|
[
"rarry2012@gmail.com"
] |
rarry2012@gmail.com
|
00f7e48f26ccbff5fc6938d1b5f2dd403b2806bb
|
b346530b455135b224470511061cffcffa6274b5
|
/pypesto/result/predict.py
|
67fb95b50ac197cade10c8b3f91f9a33d34c6a7f
|
[
"BSD-3-Clause"
] |
permissive
|
ICB-DCM/pyPESTO
|
cd3ac5c26be0648c77a409d012a6321ade968d5d
|
9a754573a7b77d30d5dc1f67a8dc1be6c29f1640
|
refs/heads/main
| 2023-09-03T19:34:55.581478
| 2023-06-29T09:22:57
| 2023-06-29T09:22:57
| 142,321,950
| 174
| 48
|
BSD-3-Clause
| 2023-09-14T14:59:50
| 2018-07-25T15:51:29
|
Python
|
UTF-8
|
Python
| false
| false
| 12,223
|
py
|
"""PredictionResult and PredictionConditionResult."""
import os
from pathlib import Path
from time import time
from typing import Dict, Sequence, Union
from warnings import warn
import h5py
import numpy as np
import pandas as pd
from ..C import (
CONDITION_IDS,
CSV,
OUTPUT,
OUTPUT_IDS,
OUTPUT_SENSI,
OUTPUT_SIGMAY,
OUTPUT_WEIGHT,
PARAMETER_IDS,
TIME,
TIMEPOINTS,
)
from ..util import get_condition_label
class PredictionConditionResult:
"""
Light-weight wrapper for the prediction of one simulation condition.
It should provide a common api how amici predictions should look like in
pyPESTO.
"""
def __init__(
self,
timepoints: np.ndarray,
output_ids: Sequence[str],
output: np.ndarray = None,
output_sensi: np.ndarray = None,
output_weight: float = None,
output_sigmay: np.ndarray = None,
x_names: Sequence[str] = None,
):
"""
Initialize PredictionConditionResult.
Parameters
----------
timepoints:
Output timepoints for this simulation condition
output_ids:
IDs of outputs for this simulation condition
output:
Postprocessed outputs (ndarray)
output_sensi:
Sensitivities of postprocessed outputs (ndarray)
output_weight:
LLH of the simulation
output_sigmay:
Standard deviations of postprocessed observables
x_names:
IDs of model parameter w.r.t to which sensitivities were computed
"""
self.timepoints = timepoints
self.output_ids = output_ids
self.output = output
self.output_sensi = output_sensi
self.output_weight = output_weight
self.output_sigmay = output_sigmay
self.x_names = x_names
if x_names is None and output_sensi is not None:
self.x_names = [
f'parameter_{i_par}' for i_par in range(output_sensi.shape[1])
]
def __iter__(self):
"""Allow usage like a dict."""
yield 'timepoints', self.timepoints
yield 'output_ids', self.output_ids
yield 'x_names', self.x_names
yield 'output', self.output
yield 'output_sensi', self.output_sensi
yield 'output_weight', self.output_weight
yield 'output_sigmay', self.output_sigmay
def __eq__(self, other):
"""Check equality of two PredictionConditionResults."""
def to_bool(expr):
if isinstance(expr, bool):
return expr
return expr.any()
if to_bool(self.timepoints != other.timepoints):
return False
if to_bool(self.x_names != other.x_names):
return False
if to_bool(self.output_ids != other.output_ids):
return False
if to_bool(self.output != other.output):
return False
if to_bool(self.output_sensi != other.output_sensi):
return False
if to_bool(self.output_weight != other.output_weight):
return False
if to_bool(self.output_sigmay != other.output_sigmay):
return False
return True
class PredictionResult:
"""
Light-weight wrapper around prediction from pyPESTO made by an AMICI model.
Its only purpose is to have fixed format/api, how prediction results
should be stored, read, and handled: as predictions are a very flexible
format anyway, they should at least have a common definition,
which allows to work with them in a reasonable way.
"""
def __init__(
self,
conditions: Sequence[Union[PredictionConditionResult, Dict]],
condition_ids: Sequence[str] = None,
comment: str = None,
):
"""
Initialize PredictionResult.
Parameters
----------
conditions:
A list of PredictionConditionResult objects or dicts
condition_ids:
IDs or names of the simulation conditions, which belong to this
prediction (e.g., PEtab uses tuples of preequilibration condition
and simulation conditions)
comment:
An additional note, which can be attached to this prediction
"""
# cast the result per condition
self.conditions = [
cond
if isinstance(cond, PredictionConditionResult)
else PredictionConditionResult(**cond)
for cond in conditions
]
self.condition_ids = condition_ids
if self.condition_ids is None:
self.condition_ids = [
get_condition_label(i_cond)
for i_cond in range(len(conditions))
]
# add a comment to this prediction if available
self.comment = comment
def __iter__(self):
"""Allow usage like an iterator."""
parameter_ids = None
if self.conditions:
parameter_ids = self.conditions[0].x_names
yield 'conditions', [dict(cond) for cond in self.conditions]
yield 'condition_ids', self.condition_ids
yield 'comment', self.comment
yield 'parameter_ids', parameter_ids
def __eq__(self, other):
"""Check equality of two PredictionResults."""
if not isinstance(other, PredictionResult):
return False
if self.comment != other.comment:
return False
if self.condition_ids != other.condition_ids:
return False
for i_cond, _ in enumerate(self.conditions):
if self.conditions[i_cond] != other.conditions[i_cond]:
return False
return True
def write_to_csv(self, output_file: str):
"""
Save predictions to a csv file.
Parameters
----------
output_file:
path to file/folder to which results will be written
"""
def _prepare_csv_output(output_file):
"""
Prepare a folder for output.
If a csv is requested, this routine will create a folder for it,
with a suiting name: csv's are by default 2-dimensional, but the
output will have the format n_conditions x n_timepoints x n_outputs
For sensitivities, we even have x n_parameters. This makes it
necessary to create multiple files and hence, a folder of its own
makes sense. Returns a pathlib.Path object of the output.
"""
# allow entering with names with and without file type endings
if '.' in output_file:
output_path, output_suffix = output_file.split('.')
else:
output_path = output_file
output_suffix = CSV
# parse path and check whether the file exists
output_path = Path(output_path)
output_path = self._check_existence(output_path)
# create
output_path.mkdir(parents=True, exist_ok=False)
# add the suffix
output_dummy = Path(output_path.stem).with_suffix(
f'.{output_suffix}'
)
return output_path, output_dummy
# process the name of the output file, create a folder
output_path, output_dummy = _prepare_csv_output(output_file)
# loop over conditions (i.e., amici edata objects)
for i_cond, cond in enumerate(self.conditions):
timepoints = pd.Series(name=TIME, data=cond.timepoints)
# handle outputs, if computed
if cond.output is not None:
# create filename for this condition
filename = output_path.joinpath(
output_dummy.stem + f'_{i_cond}' + output_dummy.suffix
)
# create DataFrame and write to file
result = pd.DataFrame(
index=timepoints, columns=cond.output_ids, data=cond.output
)
result.to_csv(filename, sep='\t')
# handle output sensitivities, if computed
if cond.output_sensi is not None:
# loop over parameters
for i_par in range(cond.output_sensi.shape[1]):
# create filename for this condition and parameter
filename = output_path.joinpath(
output_dummy.stem
+ f'_{i_cond}__s{i_par}'
+ output_dummy.suffix
)
# create DataFrame and write to file
result = pd.DataFrame(
index=timepoints,
columns=cond.output_ids,
data=cond.output_sensi[:, i_par, :],
)
result.to_csv(filename, sep='\t')
def write_to_h5(self, output_file: str, base_path: str = None):
"""
Save predictions to an h5 file.
It appends to the file if the file already exists.
Parameters
----------
output_file:
path to file/folder to which results will be written
base_path:
base path in the h5 file
"""
# check if the file exists and append to it in case it does
output_path = Path(output_file)
filemode = 'w'
if os.path.exists(output_path):
filemode = 'r+'
base = Path('.')
if base_path is not None:
base = Path(base_path)
with h5py.File(output_path, filemode) as f:
# loop over conditions (i.e., amici edata objects)
if self.conditions and self.conditions[0].x_names is not None:
f.create_dataset(
os.path.join(base, PARAMETER_IDS),
data=self.conditions[0].x_names,
)
if self.condition_ids is not None:
f.create_dataset(
os.path.join(base, CONDITION_IDS), data=self.condition_ids
)
for i_cond, cond in enumerate(self.conditions):
# each conditions gets a group of its own
f.create_group(os.path.join(base, str(i_cond)))
# save output IDs
f.create_dataset(
os.path.join(base, str(i_cond), OUTPUT_IDS),
data=cond.output_ids,
)
# save timepoints, outputs, and sensitivities of outputs
f.create_dataset(
os.path.join(base, str(i_cond), TIMEPOINTS),
data=cond.timepoints,
)
if cond.output is not None:
f.create_dataset(
os.path.join(base, str(i_cond), OUTPUT),
data=cond.output,
)
if cond.output_sensi is not None:
f.create_dataset(
os.path.join(base, str(i_cond), OUTPUT_SENSI),
data=cond.output_sensi,
)
if cond.output_weight is not None:
f.create_dataset(
os.path.join(base, str(i_cond), OUTPUT_WEIGHT),
data=cond.output_weight,
)
if cond.output_sigmay is not None:
f.create_dataset(
os.path.join(base, str(i_cond), OUTPUT_SIGMAY),
data=cond.output_sigmay,
)
@staticmethod
def _check_existence(output_path):
"""
Check whether a file or a folder already exists.
Append a timestamp if this is the case.
"""
output_path_out = output_path
while output_path_out.exists():
output_path_out = output_path_out.with_name(
output_path_out.stem + f'_{round(time() * 1000)}'
)
warn(
'Output name already existed! Changed the name of the output '
'by appending the unix timestampp to make it unique!'
)
return output_path_out
|
[
"noreply@github.com"
] |
ICB-DCM.noreply@github.com
|
3b3ffedb0a26a37b64ae6911117d444709c961dd
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_116/2742.py
|
11f9b2447b2a937fc8b101e3bde6e62529371e24
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,461
|
py
|
def main(x):
result='Draw'
for i in range(0,4):
if x[i][0]!='.' and x[i][0]!='T':
flag=x[i][0]
else:
continue
count=0
for j in range(0,4):
if x[i][j]==flag or x[i][j]=='T':
count+=1
else:
break
if count==4:
result=flag+' won'
count=0
for i in range(0,4):
if x[0][i] != '.' and x[0][i] != 'T':
flag=x[0][i]
else:
continue
count1=0
for j in range(0,4):
if x[j][i]==flag or x[j][i]=='T':
count1+=1
else:
break
if count1==4:
result=flag +' won'
count1=0
for i in range(0,4):
if x[0][0] != '.' and x[0][0] != 'T':
flag=x[0][0]
else:
continue
if flag==x[i][i] or x[i][i]=='T':
count1+=1
else:
break
if count1==4:
result=flag +' won'
if x[0][3] != '.' and x[0][3] != 'T':
flag=x[0][3]
if (x[1][2]==flag or x[1][2]=='T') and (x[2][1]==flag or x[2][1]=='T') and (x[3][0]==flag or x[3][0]=='T'):
result=flag + ' won'
if result=='Draw':
for i in range(0,4):
for j in range(0,4):
if x[i][j]=='.':
result='Game has not completed'
break
return result
if __name__ == '__main__':
import sys
inp=[[''],[''],[''],['']]
inpf=open('1.txt')
outp=open('output.txt','w')
N = int(inpf.readline())
for i in xrange(N):
for j in xrange(4):
inp[j]=inpf.readline().strip()
res = main(inp)
K=inpf.readline().strip()
outp.write("Case #%d: %s\n" % (i + 1, res))
outp.close()
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
059406c3a6505c171156ef98fa09d4509e2bfe85
|
66738cf02020d979410bf65be524ed9bb622e7c5
|
/homework/day0214/homework02.py
|
1fab74d3a284ddda1f80f9403c3284315061adc7
|
[] |
no_license
|
Liu-Zhijuan-0313/pythonAdvance
|
882a2c19cf611f068e4549a8b06bdfd0036d3624
|
e5b794f8d0fa0da5465fe123ac179ac09d62cf24
|
refs/heads/master
| 2020-04-22T09:15:57.963165
| 2019-02-23T08:02:49
| 2019-02-23T08:02:49
| 170,261,598
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,326
|
py
|
# 2.实现需求:给订单列表,用户余额两个函数添加装饰函数,能够完成权限验证功能
# 1.原始
# def checkuser():
# username = input("请输入用户名:")
# if username == "lzj":
# print("登录成功")
# showlist()
# showmoney()
# else:
# print("未授权,登录失败")
#
#
# def showlist():
# print("订单列表")
# def showmoney():
# print("用户余额")
# checkuser()
# 2.带闭包
# def checkuser(fun):
# def check():
# username = input("请输入用户名:")
# if username == "lzj":
# print("登录成功")
# fun()
# else:
# print("未授权,登录失败")
# return check
#
# def showlist():
# print("订单列表")
# showlist = checkuser(showlist)
# showlist()
# def showmoney():
# print("用户余额")
# showmoney = checkuser(showmoney)
# showmoney()
# 3.带装饰器
def checkuser(fun):
def check():
username = input("请输入用户名:")
if username == "lzj":
print("登录成功")
fun()
else:
print("未授权,登录失败")
return check
@checkuser
def showlist():
print("订单列表")
showlist()
@checkuser
def showmoney():
print("用户余额")
showmoney()
|
[
"1602176692@qq.com"
] |
1602176692@qq.com
|
fea4f6f8d0ab5ce23d28edba69f6ef51f9911fa3
|
50c14feef077d97bceba2a8aee6af60091d369db
|
/congrads/conmap.py
|
5127c5eee7fc1082562616b6750918d1b5084cfe
|
[
"BSD-3-Clause"
] |
permissive
|
kristianeschenburg/congrads
|
0d2ea92a521c88c752d98f5d9dfff2f8b6fe0957
|
ef5f315302e9801b202e0c08b396c3f6cdbe49d9
|
refs/heads/master
| 2020-04-17T03:24:39.933567
| 2020-03-27T18:58:29
| 2020-03-27T18:58:29
| 166,181,734
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,237
|
py
|
# usage: conmap.py [-h] -i INFILES -r ROIFILE -m MASKFILE -o OUTDIR
# [--nmaps NMAPS] [--save_eta2 SAVE_ETA2] [--norm NORM_FLAG]
#
# Developed at DCCN (Donders Centre for Cognitive Neuroimaging), Donders Institute
# for Brain, Cognition and Behaviour. Radboud University, Nijmegen, The Netherlands
#
# Authors: KV Haak, AF Marquand, CF Beckmann.
#
# If you use this code in your research, please quote the following journal reference:
#
# Haak KV, Marquand AF, Beckmann CF (2018) Connectopic mapping with resting-state fMRI.
# NeuroImage 170:83-94.
import numpy as np
from scipy.spatial.distance import cdist
def pca(X):
from scipy.linalg import svd
# Center X by subtracting off column means
X -= np.mean(X, 0)
# The principal components are the eigenvectors of S = X'*X./(n-1), but computed using SVD
[U, sigma, V] = svd(X, full_matrices=False)
# Project X onto the principal component axes
Y = U*sigma
# Convert the singular values to eigenvalues
sigma /= np.sqrt(X.shape[0]-1)
evals = np.square(sigma)
return V, Y, evals
def corr(X, Y):
Y = Y.T
X = X.T
R = cdist(X, Y, metric='correlation')
R = 1-R
return R
def eta2(X):
S = np.zeros((X.shape[0], X.shape[0]))
for i in range(0, X.shape[0]):
for j in range(i, X.shape[0]):
mi = np.nanmean([X[i, :], X[j, :]], 0)
mm = np.nanmean(mi)
ssw = np.nansum(np.square(X[i, :]-mi) + np.square(X[j, :]-mi))
sst = np.nansum(np.square(X[i, :]-mm) + np.square(X[j, :]-mm))
S[i, j] = 1-ssw/sst
S += S.T
S -= np.eye(S.shape[0])
return S
def norm(X):
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
return squareform(pdist(X))
def adjacency(X):
from networkx import is_connected
from networkx import from_numpy_matrix
emin = 0
emax = np.max(X)
tol = 0.0001
maxiter = 1000
cntr = 0
done = False
while not done:
e = (emin + emax) / 2
A = (X < e) - np.eye(X.shape[0])
G = from_numpy_matrix(A)
if is_connected(G):
emax = e
if (emax - emin) < tol:
done = True
else:
emin = e
cntr += 1
if cntr == maxiter:
done = True
return A
# Main routine
def main(infiles,roifile,maskfile,outdir,nmaps,save_eta2=False,norm_flag=False,proj_flag=False):
import nibabel as nib
import sys
import errno
np.seterr(invalid='ignore')
out_base_name = roifile.split('/')[-1].split('.nii')[0]
# Load the roi
try:
print('Loading roi from: ' + roifile)
roiImg = nib.load(roifile)
roi = roiImg.get_data()
except:
sys.exit('Cannot open ' + roifile | '\nExiting.')
if len(roi.shape) != 3:
sys.exit(roifile + ' is not a 3D image\nExiting.')
# Store the dimensions of the roi data for later use
roidims = roi.shape
nVoxels = np.prod(roidims)
# Reshape roi into a vector of size nVoxels
roi = np.reshape(roi,(nVoxels))
# Find the indices inside roi
roiIndices = np.where(roi>0)
# Load the mask
try:
print('Loading mask from: ' + maskfile)
maskImg = nib.load(maskfile)
mask = maskImg.get_data()
except:
sys.exit('Cannot open ' + maskfile | '\nExiting.')
if len(mask.shape) != 3:
sys.exit(maskfile + ' is not a 3D image\nExiting.')
# Reshape the mask into a vector of size nVoxels
mask = np.reshape(mask,(nVoxels))
# Find the indices outside roi but inside mask
maskIndices = np.where((roi==0) & (mask>0))
# Initialise similarity matrix
S = np.zeros([np.sum(roi>0),np.sum(roi>0)])
# Loop over infiles and create average similarity matrix
for infile in infiles:
print('Processing ' + infile)
# Load functional data
try:
dataImg = nib.load(infile)
data = dataImg.get_data()
except:
sys.exit('Cannot open ' + infile | '\nExiting.')
if len(data.shape) != 4:
sys.exit(infile + ' is not a 4D image\nExiting.')
# Assert absence of nans and infs
if np.any(np.isnan(data)) or np.any(np.isinf(data)):
sys.exit('Data contains invalid values.\nExiting.')
# Reshape and standardise
nFrames = data.shape[3]
data = np.reshape(data,(nVoxels,nFrames))
data -= np.tile(np.mean(data,1),(nFrames,1)).T
data /= np.tile(np.std(data,1),(nFrames,1)).T
# Gather data inside roi
A = data[roiIndices,:][0]
# If the roi contains invalid data it must be due to a division by 0 (stdev)
# since the data themselves do not contain nans or infs. If so, we terminate
# the program and the user should define a roi covering functional data.
if np.any(np.isnan(A)) or np.any(np.isinf(A)):
sys.exit('ROI includes voxels without variance.\nExiting.')
# Gather data outside roi
B = data[maskIndices,:][0]
# Transpose so that the data are stored in a time x space matrix
A = A.T
B = B.T
# A division by 0 (stdev) can also lead to nans and infs in the mask data.
# In this case we can simply throw a warning and ignore all voxels without
# variance.
keepB = ~np.isnan(B).any(axis=0) & ~np.isinf(B).any(axis=0)
if np.any(np.isnan(B)) or np.any(np.isinf(B)):
print('WARNING: Mask includes voxels without variance.')
del data
# Get voxel-wise connectivity fingerprints
print('Computing voxel-wise connectivity fingerprints...')
[evecs,Bhat,evals] = pca(B[:,keepB])
R = corr(A,Bhat)
# Construct similarity matrix of connectivity fingerprints
print('Computing similarity matrix...')
S += eta2(R)
if len(infiles) > 1:
print('Creating average similarity matrix...')
S /= len(infiles)
# If requested, save the similarity matrix as a matlab .mat file
if save_eta2:
import scipy.io
scipy.io.savemat(outdir + "/" + out_base_name + ".eta2", dict(S=S))
# Compute the graph Laplacian
print('Computing the graph Laplacian...')
dist = norm(S)**2
W = np.multiply(adjacency(dist),S)
D = np.diag(np.sum(W,0))
L = np.subtract(D,W)
# Solve generalised eigenvalue problem Ly = lDy
print('Computing the dominant ' + str(nmaps) + ' connectopic maps...')
from scipy.linalg import eigh
l,y = eigh(L,D,eigvals=(0,nmaps))
# The eigenvectors have an intrinsic sign indeterminacy, which is inconvenient
# for spatial statistical modeling. We deal with this by flipping the sign of
# the eigenvectors if they correlate negatively with the reference vector
# defined below.
x0,y0,z0 = np.floor(roidims[0]/2),0,0
X,Y,Z = np.ogrid[0:roidims[0],0:roidims[1],0:roidims[2]]
ref = np.sqrt((X-x0)**2+(Y-y0)**2+(Z-z0)**2)
ref = np.reshape(ref,(np.prod(roidims)))
ref = ref[np.where(roi==1)]
# Deal with sign ambiquity and, if requested, normalize y to range between 0 and 1
for evec in range(0,y.shape[1]):
y[:,evec] = np.multiply(y[:,evec],np.sign(np.corrcoef(y[:,evec],ref)[0,1]))
if norm_flag:
tmp = y[:,evec] - min(y[:,evec])
y[:,evec] = np.divide(tmp,max(tmp))
# Store the eigenmaps as a 4D nifti image
print('Writing connectopic maps to: ' + outdir)
outfile = outdir + "/" + out_base_name + ".cmaps.nii.gz"
yDat = np.zeros(shape=roidims+(nmaps,))
yDat = np.reshape(yDat,(np.prod(roidims),nmaps))
yDat[roiIndices,:] = y[:,1:nmaps+1]
yDat = np.reshape(yDat,roidims+(nmaps,))
yImg = nib.Nifti1Image(yDat,roiImg.get_affine(),roiImg.get_header())
try:
nib.save(yImg,outfile)
except:
sys.exit('Cannot save ' + outfile | '\nExiting.')
# Optionally project eigenmaps onto mask by spatial regression
if proj_flag:
print('Computing projections onto mask...')
outfile = outdir + "/" + out_base_name + ".pmaps.nii.gz"
YDat = np.zeros(shape=roidims+(nmaps,))
YDat = np.reshape(YDat,(np.prod(roidims),nmaps))
for evec in range(1,y.shape[1]):
X = np.vstack([np.ones(y.shape[0]),y[:,evec].T])
beta = np.dot(np.linalg.pinv(X.T),A.T)
Y = np.dot(B.T,beta.T)[:,1]
if norm_flag:
Y -= min(Y)
Y /= max(Y)
YDat[maskIndices,evec-1] = Y
print('Writing projection maps to: ' + outdir)
YDat = np.reshape(YDat,roidims+(nmaps,))
YImg = nib.Nifti1Image(YDat,roiImg.get_affine(),roiImg.get_header())
try:
nib.save(YImg,outfile)
except:
sys.exit('Cannot save ' + outfile | '\nExiting.')
print("Done.")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="ConGrads")
parser.add_argument("-i",dest="infiles",help="One or more 4D images",required=True,nargs='*')
parser.add_argument("-r",dest="roifile",help="Region-of-Interest (binary 3D nifti)",required=True)
parser.add_argument("-m",dest="maskfile",help="Mask (binary 3D nifti)",required=True)
parser.add_argument("-o",dest="outdir",help="Output directory",required=True)
parser.add_argument("--nmaps",dest="nmaps",default=1,type=int,help="Number of connectopic maps")
parser.add_argument("--save_eta2",dest="save_eta2",action="store_true",help="Store eta2 matrix")
parser.add_argument("--norm",dest="norm_flag",action="store_true",help="Normalise maps")
parser.add_argument("--project",dest="proj_flag",action="store_true",help="Project maps onto mask")
args=parser.parse_args()
main(args.infiles,args.roifile,args.maskfile,args.outdir,args.nmaps,args.save_eta2,args.norm_flag,args.proj_flag)
|
[
"keschenb@uw.edu"
] |
keschenb@uw.edu
|
053462ec4a7e180cc789d1c4e57d7317a937c305
|
de1d7a3d8f29f88cc81163daf13e689b6a40f059
|
/email_messages/forms.py
|
405125dc3385913d028d2a32af5fedf65de7a455
|
[] |
no_license
|
kelechi2020/golivecomptask
|
764c8be7cdb760b492bbd203cb1831fd47ba9e0c
|
76c2d3c1ad4830399be0bf41bb63731bc50fe5e6
|
refs/heads/master
| 2022-12-10T14:34:28.809502
| 2017-08-06T12:36:45
| 2017-08-06T12:36:45
| 98,848,170
| 0
| 0
| null | 2022-12-08T00:42:51
| 2017-07-31T04:30:17
|
CSS
|
UTF-8
|
Python
| false
| false
| 985
|
py
|
# -*- coding: UTF-8 -*-
from __future__ import unicode_literals
from django import forms
from django.utils.translation import ugettext, ugettext_lazy as _
from django.core.mail import send_mail
from django.contrib.auth.models import User
class MessageForm(forms.Form):
recipient = forms.ModelChoiceField(label=_("Recipient"), queryset=User.objects.all(), required=True,)
message = forms.CharField(label=_("Message"), widget=forms.Textarea, required=True,)
def __init__(self, request, *args, **kwargs):
super(MessageForm, self).__init__(*args, **kwargs)
self.request = request
self.fields["recipient"].queryset = self.fields["recipient"].queryset.exclude(pk=request.user.pk)
def save(self):
cleaned_data = self.cleaned_data
send_mail(subject=ugettext("A message from %s") % self.request.user, message=cleaned_data["message"], from_email=self.request.user.email, recipient_list=[cleaned_data["recipient"]], fail_silently=True)
|
[
"egbosikelechi@gmail.com"
] |
egbosikelechi@gmail.com
|
0bbb06dc8d7cbf276b4acee582f650109dd8b1fa
|
1d928c3f90d4a0a9a3919a804597aa0a4aab19a3
|
/python/celery/2015/12/test_builtins.py
|
73601734b9176fe303b39b5ab5d9b913e6298584
|
[] |
no_license
|
rosoareslv/SED99
|
d8b2ff5811e7f0ffc59be066a5a0349a92cbb845
|
a062c118f12b93172e31e8ca115ce3f871b64461
|
refs/heads/main
| 2023-02-22T21:59:02.703005
| 2021-01-28T19:40:51
| 2021-01-28T19:40:51
| 306,497,459
| 1
| 1
| null | 2020-11-24T20:56:18
| 2020-10-23T01:18:07
| null |
UTF-8
|
Python
| false
| false
| 5,411
|
py
|
from __future__ import absolute_import
from celery import group, chord
from celery.app import builtins
from celery.five import range
from celery.utils.functional import pass1
from celery.tests.case import AppCase, ContextMock, Mock, patch
class BuiltinsCase(AppCase):
def setup(self):
@self.app.task(shared=False)
def xsum(x):
return sum(x)
self.xsum = xsum
@self.app.task(shared=False)
def add(x, y):
return x + y
self.add = add
class test_backend_cleanup(BuiltinsCase):
def test_run(self):
self.app.backend.cleanup = Mock()
self.app.backend.cleanup.__name__ = 'cleanup'
cleanup_task = builtins.add_backend_cleanup_task(self.app)
cleanup_task()
self.assertTrue(self.app.backend.cleanup.called)
class test_accumulate(BuiltinsCase):
def setup(self):
self.accumulate = self.app.tasks['celery.accumulate']
def test_with_index(self):
self.assertEqual(self.accumulate(1, 2, 3, 4, index=0), 1)
def test_no_index(self):
self.assertEqual(self.accumulate(1, 2, 3, 4), (1, 2, 3, 4))
class test_map(BuiltinsCase):
def test_run(self):
@self.app.task(shared=False)
def map_mul(x):
return x[0] * x[1]
res = self.app.tasks['celery.map'](
map_mul, [(2, 2), (4, 4), (8, 8)],
)
self.assertEqual(res, [4, 16, 64])
class test_starmap(BuiltinsCase):
def test_run(self):
@self.app.task(shared=False)
def smap_mul(x, y):
return x * y
res = self.app.tasks['celery.starmap'](
smap_mul, [(2, 2), (4, 4), (8, 8)],
)
self.assertEqual(res, [4, 16, 64])
class test_chunks(BuiltinsCase):
@patch('celery.canvas.chunks.apply_chunks')
def test_run(self, apply_chunks):
@self.app.task(shared=False)
def chunks_mul(l):
return l
self.app.tasks['celery.chunks'](
chunks_mul, [(2, 2), (4, 4), (8, 8)], 1,
)
self.assertTrue(apply_chunks.called)
class test_group(BuiltinsCase):
def setup(self):
self.maybe_signature = self.patch('celery.canvas.maybe_signature')
self.maybe_signature.side_effect = pass1
self.app.producer_or_acquire = Mock()
self.app.producer_or_acquire.attach_mock(ContextMock(), 'return_value')
self.app.conf.task_always_eager = True
self.task = builtins.add_group_task(self.app)
super(test_group, self).setup()
def test_apply_async_eager(self):
self.task.apply = Mock(name='apply')
self.task.apply_async((1, 2, 3, 4, 5))
self.assertTrue(self.task.apply.called)
def mock_group(self, *tasks):
g = group(*tasks, app=self.app)
result = g.freeze()
for task in g.tasks:
task.clone = Mock(name='clone')
task.clone.attach_mock(Mock(), 'apply_async')
return g, result
@patch('celery.app.base.Celery.current_worker_task')
def test_task(self, current_worker_task):
g, result = self.mock_group(self.add.s(2), self.add.s(4))
self.task(g.tasks, result, result.id, (2,)).results
g.tasks[0].clone().apply_async.assert_called_with(
group_id=result.id, producer=self.app.producer_or_acquire(),
add_to_parent=False,
)
current_worker_task.add_trail.assert_called_with(result)
@patch('celery.app.base.Celery.current_worker_task')
def test_task__disable_add_to_parent(self, current_worker_task):
g, result = self.mock_group(self.add.s(2, 2), self.add.s(4, 4))
self.task(g.tasks, result, result.id, None, add_to_parent=False)
self.assertFalse(current_worker_task.add_trail.called)
class test_chain(BuiltinsCase):
def setup(self):
BuiltinsCase.setup(self)
self.task = builtins.add_chain_task(self.app)
def test_not_implemented(self):
with self.assertRaises(NotImplementedError):
self.task()
class test_chord(BuiltinsCase):
def setup(self):
self.task = builtins.add_chord_task(self.app)
super(test_chord, self).setup()
def test_apply_async(self):
x = chord([self.add.s(i, i) for i in range(10)], body=self.xsum.s())
r = x.apply_async()
self.assertTrue(r)
self.assertTrue(r.parent)
def test_run_header_not_group(self):
self.task([self.add.s(i, i) for i in range(10)], self.xsum.s())
def test_forward_options(self):
body = self.xsum.s()
x = chord([self.add.s(i, i) for i in range(10)], body=body)
x.run = Mock(name='chord.run(x)')
x.apply_async(group_id='some_group_id')
self.assertTrue(x.run.called)
resbody = x.run.call_args[0][1]
self.assertEqual(resbody.options['group_id'], 'some_group_id')
x2 = chord([self.add.s(i, i) for i in range(10)], body=body)
x2.run = Mock(name='chord.run(x2)')
x2.apply_async(chord='some_chord_id')
self.assertTrue(x2.run.called)
resbody = x2.run.call_args[0][1]
self.assertEqual(resbody.options['chord'], 'some_chord_id')
def test_apply_eager(self):
self.app.conf.task_always_eager = True
x = chord([self.add.s(i, i) for i in range(10)], body=self.xsum.s())
r = x.apply_async()
self.assertEqual(r.get(), 90)
|
[
"rodrigosoaresilva@gmail.com"
] |
rodrigosoaresilva@gmail.com
|
d0c3bd3d778e8c722d0034f32411f2807179fe54
|
aec28a032dd5788d9201d6325f2efa285116696e
|
/snake_iterator/t2d.py
|
cd291e17c7b793521c037741410e3ab47419c48d
|
[] |
no_license
|
pletzer/pyterp_tests
|
346addfe89ff14613e986ca2b9a14206f9b41d45
|
56be0634d8f7402ce5322a6a67c1843a593d31de
|
refs/heads/master
| 2020-05-29T08:50:40.072549
| 2017-07-20T03:23:34
| 2017-07-20T03:23:34
| 69,289,048
| 1
| 3
| null | 2017-03-22T19:15:02
| 2016-09-26T20:15:44
|
Python
|
UTF-8
|
Python
| false
| false
| 268
|
py
|
n0, n1 = 3, 4
inds = []
for j in range(n0):
for i in range(n1):
indexFlat = n1*j + i
indexSnake = n1*j + (1 - j%2)*i + (n1 - 1 - i)*(j%2)
inds.append(indexSnake)
print('indexFlat = {} indexSnake = {}'.format(indexFlat, indexSnake))
inds.sort()
print(inds)
|
[
"alexander@gokliya.net"
] |
alexander@gokliya.net
|
826e0cec2f2c532e1a1a11b0b59549a07ebdb131
|
924814aef07d17e10461ed2da54e935ea40c0456
|
/links2markdown/links2markdown.py
|
67b641d2f1aa7c7e6e21061f024b545880499be3
|
[
"WTFPL",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
hydrargyrum/attic
|
edd7f302e273cd7e762c8bd8efd365ac4cd24aa1
|
bf90a01ddaeb505b783ec3853c46aaaa0aa51304
|
refs/heads/master
| 2023-09-01T08:11:13.126290
| 2023-08-27T13:06:55
| 2023-08-27T13:08:09
| 13,541,344
| 18
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,208
|
py
|
#!/usr/bin/env python3
# SPDX-License-Identifier: WTFPL
import argparse
import re
import signal
import sys
from html.parser import HTMLParser
import requests
LINK_RE = re.compile(r"""https?://[^])'">\s]+""")
class TitleFetchParser(HTMLParser):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.path = []
self.title = None
def handle_starttag(self, tag, attrs):
self.path.insert(0, tag)
def handle_endtag(self, tag):
try:
idx = self.path.index(tag)
except ValueError:
return
raise AssertionError(f"{self.path[0]!r} != {tag!r}")
del self.path[:idx + 1]
def handle_data(self, data):
if self.title:
return
if self.path and self.path[0] == "title" and "head" in self.path:
self.title = data
def fetch_title(url):
try:
response = requests.get(
url,
headers={
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/113.0",
},
)
except requests.exceptions.RequestException:
return None
parser = TitleFetchParser(convert_charrefs=True)
try:
parser.feed(response.text)
parser.close()
except AssertionError as exc:
print(f"failed on {url}: {exc}", file=sys.stderr)
return None
else:
return parser.title
def link_to_markdown(m):
url = m[0]
if m.start() > 2 and m.string[m.start() - 1] == "(" and m.string[m.start() - 2] == "]":
return url
title = fetch_title(url) or url
title = re.sub(r"\s+", " ", title.strip())
return f"[{title}]({url})"
def main():
signal.signal(signal.SIGINT, signal.SIG_DFL)
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
parser = argparse.ArgumentParser()
parser.add_argument("file", default="-", nargs="?")
args = parser.parse_args()
if args.file == "-":
fp = sys.stdin
else:
fp = open(args.file)
with fp:
for line in fp:
line = LINK_RE.sub(link_to_markdown, line)
print(line, end="")
if __name__ == "__main__":
main()
|
[
"dev@indigo.re"
] |
dev@indigo.re
|
5330f052d2dd394451f49779af973f7ceeeaf5ec
|
658773cf775fd97c3cec3aca5f559500dec021bc
|
/modules/s3/s3track.py
|
ad41a55d76ce313b2884a60b4fc1286e1f01ef89
|
[
"MIT"
] |
permissive
|
smeissner/ifrc
|
f3795474219d20fba5c68192f5d9b90006288e3e
|
505eb6ffbb8fc32fdbbe63fdab4c19d87e53ca86
|
refs/heads/master
| 2021-01-18T10:43:55.847965
| 2012-10-07T22:43:15
| 2012-10-07T22:43:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 24,525
|
py
|
# -*- coding: utf-8 -*-
""" Simple Generic Location Tracking System
@copyright: 2011-12 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from gluon import current
from gluon.dal import Table, Query, Set, Expression, Rows, Row
from datetime import datetime, timedelta
__all__ = ["S3Tracker"]
UID = "uuid" # field name for UIDs
TRACK_ID = "track_id" # field name for track ID
LOCATION_ID = "location_id" # field name for base location
LOCATION = "gis_location" # location tablename
PRESENCE = "sit_presence" # presence tablename
# =============================================================================
class S3Trackable(object):
"""
Trackable types instance(s)
"""
def __init__(self, trackable, record_id=None, uid=None, rtable=None):
"""
Constructor:
@param trackable: the trackable object
@param record_id: the record ID(s) (if object is a table or tablename)
@param uid: the record UID(s) (if object is a table or tablename)
@param rtable: the resource table (for the recursive calls)
"""
db = current.db
s3db = current.s3db
self.records = []
self.table = s3db.sit_trackable
self.rtable = rtable
if isinstance(trackable, (Table, str)):
if hasattr(trackable, "_tablename"):
table = trackable
tablename = table._tablename
else:
table = s3db[trackable]
tablename = trackable
fields = self.__get_fields(table)
if not fields:
raise SyntaxError("Not a trackable type: %s" % tablename)
query = (table._id > 0)
if uid is None:
if record_id is not None:
if isinstance(record_id, (list, tuple)):
query = (table._id.belongs(record_id))
else:
query = (table._id == record_id)
elif UID in table.fields:
if not isinstance(uid, (list, tuple)):
query = (table[UID].belongs(uid))
else:
query = (table[UID] == uid)
fields = [table[f] for f in fields]
rows = db(query).select(*fields)
elif isinstance(trackable, Row):
fields = self.__get_fields(trackable)
if not fields:
raise SyntaxError("Required fields not present in the row")
rows = Rows(records=[trackable], compact=False)
elif isinstance(trackable, Rows):
rows = [r for r in trackable if self.__get_fields(r)]
fail = len(trackable) - len(rows)
if fail:
raise SyntaxError("Required fields not present in %d of the rows" % fail)
rows = Rows(records=rows, compact=False)
elif isinstance(trackable, (Query, Expression)):
tablename = db._adapter.get_table(trackable)
self.rtable = s3db[tablename]
fields = self.__get_fields(self.rtable)
if not fields:
raise SyntaxError("Not a trackable type: %s" % tablename)
query = trackable
fields = [self.rtable[f] for f in fields]
rows = db(query).select(*fields)
elif isinstance(trackable, Set):
query = trackable.query
tablename = db._adapter.get_table(query)
table = s3db[tablename]
fields = self.__get_fields(table)
if not fields:
raise SyntaxError("Not a trackable type: %s" % tablename)
fields = [table[f] for f in fields]
rows = trackable.select(*fields)
else:
raise SyntaxError("Invalid parameter type %s" % type(trackable))
records = []
for r in rows:
if self.__super_entity(r):
table = s3db[r.instance_type]
fields = self.__get_fields(table, super_entity=False)
if not fields:
raise SyntaxError("No trackable type: %s" % table._tablename)
fields = [table[f] for f in fields]
query = table[UID] == r[UID]
row = db(query).select(limitby=(0, 1), *fields).first()
if row:
records.append(row)
else:
records.append(r)
self.records = Rows(records=records, compact=False)
# -------------------------------------------------------------------------
@staticmethod
def __super_entity(trackable):
"""
Check whether a trackable is a super-entity
@param trackable: the trackable object
"""
if hasattr(trackable, "fields"):
keys = trackable.fields
else:
keys = trackable
return "instance_type" in keys
# -------------------------------------------------------------------------
def __get_fields(self, trackable, super_entity=True):
"""
Check a trackable for presence of required fields
@param: the trackable object
"""
fields = []
if hasattr(trackable, "fields"):
keys = trackable.fields
else:
keys = trackable
try:
if super_entity and \
self.__super_entity(trackable) and UID in keys:
return ("instance_type", UID)
if LOCATION_ID in keys:
fields.append(LOCATION_ID)
if TRACK_ID in keys:
fields.append(TRACK_ID)
return fields
elif hasattr(trackable, "update_record") or \
isinstance(trackable, Table) or \
isinstance(trackable, Row):
return fields
except:
pass
return None
# -------------------------------------------------------------------------
def get_location(self,
timestmp=None,
_fields=None,
_filter=None,
as_rows=False,
exclude=[]):
"""
Get the current location of the instance(s) (at the given time)
@param timestmp: last datetime for presence (defaults to current time)
@param _fields: fields to retrieve from the location records (None for ALL)
@param _filter: filter for the locations
@param as_rows: return the result as Rows object
@param exclude: interlocks to break at (avoids circular check-ins)
@returns: a location record, or a list of location records (if multiple)
"""
db = current.db
s3db = current.s3db
ptable = s3db[PRESENCE]
ltable = s3db[LOCATION]
if timestmp is None:
timestmp = datetime.utcnow()
locations = []
for r in self.records:
location = None
if TRACK_ID in r:
query = ((ptable.deleted == False) & \
(ptable[TRACK_ID] == r[TRACK_ID]) & \
(ptable.timestmp <= timestmp))
presence = db(query).select(orderby=~ptable.timestmp,
limitby=(0, 1)).first()
if presence:
if presence.interlock:
exclude = [r[TRACK_ID]] + exclude
tablename, record = presence.interlock.split(",", 1)
trackable = S3Trackable(tablename, record)
record = trackable.records.first()
if TRACK_ID not in record or \
record[TRACK_ID] not in exclude:
location = trackable.get_location(timestmp=timestmp,
exclude=exclude,
_fields=_fields).first()
elif presence.location_id:
query = (ltable.id == presence.location_id)
if _filter is not None:
query = query & _filter
if _fields is None:
location = db(query).select(ltable.ALL,
limitby=(0, 1)).first()
else:
location = db(query).select(limitby=(0, 1),
*_fields).first()
if not location:
if len(self.records) > 1:
trackable = S3Trackable(r, rtable=self.rtable)
else:
trackable = self
location = trackable.get_base_location(_fields=_fields)
if location:
locations.append(location)
else:
# Ensure we return an entry so that indexes match
locations.append(Row({"lat": None, "lon": None}))
if as_rows:
return Rows(records=locations, compact=False)
if not locations:
return None
else:
return locations
# -------------------------------------------------------------------------
def set_location(self, location, timestmp=None):
"""
Set the current location of instance(s) (at the given time)
@param location: the location (as Row or record ID)
@param timestmp: the datetime of the presence (defaults to current time)
@returns: nothing
"""
ptable = current.s3db[PRESENCE]
if timestmp is None:
timestmp = datetime.utcnow()
if isinstance(location, S3Trackable):
location = location.get_base_location()
if isinstance(location, Rows):
location = location.first()
if isinstance(location, Row):
if "location_id" in location:
location = location.location_id
else:
location = location.id
if not location:
return
else:
data = dict(location_id=location, timestmp=timestmp)
for r in self.records:
if TRACK_ID not in r:
# No track ID => set base location
if len(self.records) > 1:
trackable = S3Trackable(r)
else:
trackable = self
trackable.set_base_location(location)
elif r[TRACK_ID]:
data.update({TRACK_ID:r[TRACK_ID]})
ptable.insert(**data)
self.__update_timestamp(r[TRACK_ID], timestmp)
# -------------------------------------------------------------------------
def check_in(self, table, record, timestmp=None):
"""
Bind the presence of the instance(s) to another instance
@param table: table name of the other resource
@param record: record in the other resource (as Row or record ID)
@param timestmp: datetime of the check-in
@returns: nothing
"""
db = current.db
s3db = current.s3db
ptable = s3db[PRESENCE]
if isinstance(table, str):
table = s3db[table]
fields = self.__get_fields(table)
if not fields:
raise SyntaxError("No location data in %s" % table._tablename)
interlock = None
if isinstance(record, Rows):
record = record.first()
if not isinstance(record, Row):
record = table[record]
if self.__super_entity(record):
table = s3db[record.instance_type]
fields = self.__get_fields(table, super_entity=False)
if not fields:
raise SyntaxError("No trackable type: %s" % table._tablename)
query = table[UID] == record[UID]
record = db(query).select(limitby=(0, 1)).first()
if record and table._id.name in record:
record = record[table._id.name]
if record:
interlock = "%s,%s" % (table, record)
else:
raise SyntaxError("No record specified for %s" % table._tablename)
if interlock:
if timestmp is None:
timestmp = datetime.utcnow()
data = dict(location_id=None,
timestmp=timestmp,
interlock=interlock)
q = ((ptable.deleted == False) & (ptable.timestmp <= timestmp))
for r in self.records:
if TRACK_ID not in r:
# Cannot check-in a non-trackable
continue
query = q & (ptable[TRACK_ID] == r[TRACK_ID])
presence = db(query).select(orderby=~ptable.timestmp,
limitby=(0, 1)).first()
if presence and presence.interlock == interlock:
# already checked-in to the same instance
continue
data.update({TRACK_ID:r[TRACK_ID]})
ptable.insert(**data)
self.__update_timestamp(r[TRACK_ID], timestmp)
# -------------------------------------------------------------------------
def check_out(self, table=None, record=None, timestmp=None):
"""
Make the last log entry before timestmp independent from
the referenced entity (if any)
@param timestmp: the date/time of the check-out, defaults
to current time
"""
db = current.db
s3db = current.s3db
ptable = s3db[PRESENCE]
if timestmp is None:
timestmp = datetime.utcnow()
interlock = None
if table is not None:
if isinstance(table, str):
table = s3db[table]
if isinstance(record, Rows):
record = record.first()
if self.__super_entity(table):
if not isinstance(record, Row):
record = table[record]
table = s3db[record.instance_type]
fields = self.__get_fields(table, super_entity=False)
if not fields:
raise SyntaxError("No trackable type: %s" % table._tablename)
query = table[UID] == record[UID]
record = db(query).select(limitby=(0, 1)).first()
if isinstance(record, Row) and table._id.name in record:
record = record[table._id.name]
if record:
interlock = "%s,%s" % (table, record)
else:
return
q = ((ptable.deleted == False) & (ptable.timestmp <= timestmp))
for r in self.records:
if TRACK_ID not in r:
# Cannot check-out a non-trackable
continue
query = q & (ptable[TRACK_ID] == r[TRACK_ID])
presence = db(query).select(orderby=~ptable.timestmp,
limitby=(0, 1)).first()
if presence and presence.interlock:
if interlock and presence.interlock != interlock:
continue
elif not interlock and table and \
not presence.interlock.startswith("%s" % table):
continue
tablename, record = presence.interlock.split(",", 1)
trackable = S3Trackable(tablename, record)
location = trackable.get_location(timestmp=timestmp).first()
if timestmp - presence.timestmp < timedelta(seconds=1):
timestmp = timestmp + timedelta(seconds=1)
data = dict(location_id=location,
timestmp=timestmp,
interlock=None)
data.update({TRACK_ID:r[TRACK_ID]})
ptable.insert(**data)
self.__update_timestamp(r[TRACK_ID], timestmp)
# -------------------------------------------------------------------------
def remove_location(self, location=None):
"""
Remove a location from the presence log of the instance(s)
@todo: implement
"""
raise NotImplementedError
# -------------------------------------------------------------------------
def get_base_location(self,
_fields=None,
_filter=None,
as_rows=False):
"""
Get the base location of the instance(s)
@param _fields: fields to retrieve from the location records (None for ALL)
@param _filter: filter for the locations
@param as_rows: return the result as Rows object
@returns: the base location(s) of the current instance
"""
db = current.db
s3db = current.s3db
ltable = s3db[LOCATION]
rtable = self.rtable
locations = []
for r in self.records:
location = None
query = None
if LOCATION_ID in r:
query = (ltable.id == r[LOCATION_ID])
if rtable:
query = query & (rtable[LOCATION_ID] == ltable.id)
if TRACK_ID in r:
query = query & (rtable[TRACK_ID] == r[TRACK_ID])
elif TRACK_ID in r:
q = (self.table[TRACK_ID] == r[TRACK_ID])
trackable = db(q).select(limitby=(0, 1)).first()
table = s3db[trackable.instance_type]
if LOCATION_ID in table.fields:
query = ((table[TRACK_ID] == r[TRACK_ID]) &
(table[LOCATION_ID] == ltable.id))
if query:
if _filter is not None:
query = query & _filter
if not _fields:
location = db(query).select(ltable.ALL,
limitby=(0, 1)).first()
else:
location = db(query).select(limitby=(0, 1),
*_fields).first()
if location:
locations.append(location)
else:
# Ensure we return an entry so that indexes match
locations.append(Row({"id": None, "lat": None, "lon": None}))
if as_rows:
return Rows(records=locations, compact=False)
if not locations:
return None
elif len(locations) == 1:
return locations[0]
else:
return locations
# -------------------------------------------------------------------------
def set_base_location(self, location=None):
"""
Set the base location of the instance(s)
@param location: the location for the base location as Row or record ID
@returns: nothing
@note: instance tables without a location_id field will be ignored
"""
db = current.db
s3db = current.s3db
if isinstance(location, S3Trackable):
location = location.get_base_location()
if isinstance(location, Rows):
location = location.first()
if isinstance(location, Row):
location = location.id
if not location or not str(location).isdigit():
# Location not found
return
else:
data = {LOCATION_ID:location}
# Update records without track ID
for r in self.records:
if TRACK_ID in r:
continue
elif LOCATION_ID in r:
if hasattr(r, "update_record"):
r.update_record(**data)
else:
raise SyntaxError("Cannot relate record to a table.")
# Update records with track ID
# => this can happen table-wise = less queries
track_ids = [r[TRACK_ID] for r in self.records
if TRACK_ID in r]
rows = db(self.table[TRACK_ID].belongs(track_ids)).select()
tables = []
for r in rows:
instance_type = r.instance_type
table = s3db[instance_type]
if instance_type not in tables and \
LOCATION_ID in table.fields:
tables.append(table)
else:
# No location ID in this type => ignore gracefully
continue
# Location specified => update all base locations
for table in tables:
db(table[TRACK_ID].belongs(track_ids)).update(**data)
# Refresh records
for r in self.records:
if LOCATION_ID in r:
r[LOCATION_ID] = location
# -------------------------------------------------------------------------
def __update_timestamp(self, track_id, timestamp):
"""
Update the timestamp of a trackable
@param track_id: the trackable ID (super-entity key)
@param timestamp: the timestamp
"""
if timestamp is None:
timestamp = datetime.utcnow()
if track_id:
trackable = self.table[track_id]
if trackable:
trackable.update_record(track_timestmp=timestamp)
# =============================================================================
class S3Tracker(object):
"""
S3 Tracking system, to be instantiated once as global "s3tracker" object
"""
def __init__(self):
"""
Constructor
"""
# -------------------------------------------------------------------------
def __call__(self, trackable, record_id=None, uid=None):
"""
Get a tracking interface for a record or set of records
@param trackable: a Row, Rows, Query, Expression, Set object or
a Table or a tablename
@param record_id: a record ID or a list/tuple of record IDs
(together with Table or tablename)
@param uid: a record UID or a list/tuple of record UIDs
(together with Table or tablename)
@returns: a S3Trackable instance for the specified record(s)
"""
return S3Trackable(trackable,
record_id=record_id,
uid=uid)
# -------------------------------------------------------------------------
def get_all(self, entity,
location=None,
bbox=None,
timestmp=None):
"""
Get all instances of the given entity at the given location and time
"""
raise NotImplementedError
# -------------------------------------------------------------------------
def get_checked_in(self, table, record,
instance_type=None,
timestmp=None):
"""
Get all trackables of the given type that are checked-in
to the given instance at the given time
"""
raise NotImplementedError
# =============================================================================
|
[
"fran@aidiq.com"
] |
fran@aidiq.com
|
b2e6507f7cbc0024a8361d9508c6c4dc5de947ec
|
3a3533b16b54d42d6889c490224345ca985bef74
|
/account_loewie/stock_loewie.py
|
30917e56bf0b12bb40ff4b547bbe9fbb910738b7
|
[
"Apache-2.0"
] |
permissive
|
lester-lees/extra_addons_hk
|
52916ac6858d4b4484bd137b55268c7d5de177d0
|
edd2c2595146bc9c99b75a2d0831a93f940fa55c
|
refs/heads/master
| 2021-01-06T20:43:33.448307
| 2019-03-28T06:46:17
| 2019-03-28T06:46:17
| 99,546,719
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,228
|
py
|
# -*- coding: utf-8 -*-
from openerp.osv import fields, osv
import logging
_logger = logging.getLogger(__name__)
class stock_move(osv.osv):
_inherit = "stock.move"
_order = 'id , date_expected desc'
def _get_sale_order_line(self, cr, uid, ids, field_name, arg, context=None):
result = {}
for move in self.pool.get('stock.move').browse(cr, uid, ids, context=context):
result[move.id] = move.procurement_id.sale_line_id.id
return result
_columns = {
'sale_order_line': fields.function(_get_sale_order_line, type='many2one', relation='sale.order.line',string='Sales Line'),
}
class stock_picking(osv.osv):
_inherit = 'stock.picking'
_order = "id desc, priority desc, date asc"
_columns = {
'ref_invoice':fields.many2one('account.invoice',string=u'关联发票'),
}
def show_account_delivery(self, cr, uid, ids, context=None):
act_obj = self.pool.get('ir.actions.act_window')
result = act_obj.read(cr, uid, [483], context=context)[0]
if ids == 0:
result['domain'] = "[('state','=','done'), ('ref_invoice','=',False),('picking_type_id','in',[2])]"
elif ids == 1:
result['domain'] = "[('state','=','done'), ('ref_invoice','!=',False),('picking_type_id','in',[2])]"
return result
def _invoice_create_line(self, cr, uid, moves, journal_id, inv_type='out_invoice', context=None):
invoice_obj = self.pool.get('account.invoice')
move_obj = self.pool.get('stock.move')
invoices = {}
_logger.info("Jimmy --- _invoice_create_line in sotck_loewie")
for move in moves:
company = move.company_id
origin = move.picking_id.name
partner, user_id, currency_id = move_obj._get_master_data(cr, uid, move, company, context=context)
key = (partner, currency_id, company.id, user_id)
invoice_vals = self._get_invoice_vals(cr, uid, key, inv_type, journal_id, move, context=context)
if key not in invoices:
# Get account and payment terms
invoice_id = self._create_invoice_from_picking(cr, uid, move.picking_id, invoice_vals, context=context)
invoices[key] = invoice_id
invoice = invoice_obj.browse(cr, uid, [invoice_id], context=context)[0]
invoice.write({'picking_id': move.picking_id.id})
move.picking_id.ref_invoice = invoice_id
_logger.info("Jimmy picking_id:%d" % move.picking_id.id)
if move.picking_id.sale_id :
invoice.write({'sale_id': move.picking_id.sale_id.id})
_logger.info("Jimmy sale_id:%d" % move.picking_id.sale_id.id)
else:
invoice = invoice_obj.browse(cr, uid, invoices[key], context=context)
if not invoice.origin or invoice_vals['origin'] not in invoice.origin.split(', '):
invoice_origin = filter(None, [invoice.origin, invoice_vals['origin']])
invoice.write({'origin': ', '.join(invoice_origin)})
invoice.write({'picking_id': move.picking_id.id})
_logger.info("Jimmy nokey picking_id:%d" % move.picking_id.id)
move.picking_id.ref_invoice = invoice_id
if move.picking_id.sale_id :
_logger.info("Jimmy nokey sale_id:%d" % move.picking_id.sale_id.id)
invoice.write({'sale_id': move.picking_id.sale_id.id})
invoice_line_vals = move_obj._get_invoice_line_vals(cr, uid, move, partner, inv_type, context=context)
invoice_line_vals['invoice_id'] = invoices[key]
invoice_line_vals['origin'] = origin
move_obj._create_invoice_line_from_vals(cr, uid, move, invoice_line_vals, context=context)
move_obj.write(cr, uid, move.id, {'invoice_state': 'invoiced'}, context=context)
invoice_obj.button_compute(cr, uid, invoices.values(), context=context, set_total=(inv_type in ('in_invoice', 'in_refund')))
return invoices.values()
|
[
"346994202@qq.com"
] |
346994202@qq.com
|
b1947d513d3280e30c2ad6204ed27e488a5c4920
|
c09817490b36beaea98abc8c955904528c5cd4fd
|
/tests/test_0013-rntuple-anchor.py
|
1c3cb5a8ffd9fbacef60a0be301fe4f5ae217ce2
|
[
"BSD-3-Clause"
] |
permissive
|
oshadura/uproot4
|
245b7e14a3341d87a9e655792c6ee912ad443586
|
ee535f6632d371d82b5173a43d6445c854968315
|
refs/heads/master
| 2023-08-19T13:48:23.541016
| 2021-09-22T23:51:52
| 2021-09-22T23:51:52
| 287,539,468
| 0
| 0
|
BSD-3-Clause
| 2020-08-14T13:29:03
| 2020-08-14T13:29:02
| null |
UTF-8
|
Python
| false
| false
| 3,610
|
py
|
# BSD 3-Clause License; see https://github.com/scikit-hep/uproot4/blob/main/LICENSE
from __future__ import absolute_import
import json
import sys
try:
import queue
except ImportError:
import Queue as queue
import numpy
import pytest
import skhep_testdata
import uproot
def test():
filename = skhep_testdata.data_path("uproot-ntpl001_staff.root")
with uproot.open(filename) as f:
obj = f["Staff"]
assert obj.member("fVersion") == 0
assert obj.member("fSize") == 48
assert obj.member("fSeekHeader") == 854
assert obj.member("fNBytesHeader") == 537
assert obj.member("fLenHeader") == 2495
assert obj.member("fSeekFooter") == 72369
assert obj.member("fNBytesFooter") == 285
assert obj.member("fLenFooter") == 804
assert obj.member("fReserved") == 0
header_start = obj.member("fSeekHeader")
header_stop = header_start + obj.member("fNBytesHeader")
header_chunk = f.file.source.chunk(header_start, header_stop)
print("HEADER")
cursor = uproot.Cursor(header_start)
cursor.debug(header_chunk, 80)
print("\n")
notifications = queue.Queue()
footer_start = obj.member("fSeekFooter")
footer_stop = footer_start + obj.member("fNBytesFooter")
header_chunk, footer_chunk = f.file.source.chunks(
[(header_start, header_stop), (footer_start, footer_stop)],
notifications,
)
print("FOOTER")
cursor = uproot.Cursor(footer_start)
cursor.debug(footer_chunk, 80)
print("\n")
# HEADER
# --+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+-
# 76 52 1 16 2 0 191 9 0 198 14 105 8 80 63 75 128 117 0 0
# L 4 --- --- --- --- --- --- --- --- --- i --- P ? K --- u --- ---
# --+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+-
# 0 0 187 9 0 1 0 144 5 0 0 0 83 116 97 102 102 13 0 255
# --- --- --- --- --- --- --- --- --- --- --- --- S t a f f --- --- ---
# --+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+-
# 6 16 0 0 0 117 110 100 101 102 105 110 101 100 32 97 117 116 104 111
# --- --- --- --- --- u n d e f i n e d a u t h o
# --+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+-
# 114 0 1 0 4 47 24 0 1 0 3 31 12 12 0 0 4 8 0 110
# r --- --- --- --- / --- --- --- --- --- --- --- --- --- --- --- --- --- n
# FOOTER
# --+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+-
# 76 52 1 20 1 0 36 3 0 86 138 213 67 60 183 39 139 27 0 1
# L 4 --- --- --- --- $ --- --- V --- --- C < --- ' --- --- --- ---
# --+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+-
# 0 23 1 12 0 23 12 12 0 42 72 0 1 0 47 24 0 1 0 7
# --- --- --- --- --- --- --- --- --- * H --- --- --- / --- --- --- --- ---
# --+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+-
# 34 26 13 8 0 34 145 5 8 0 34 213 9 86 0 27 13 84 0 0
# " --- --- --- --- " --- --- --- --- " --- --- V --- --- --- T --- ---
# --+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+-
# 1 0 102 52 26 0 0 148 1 124 0 0 16 0 34 102 15 17 0 34
# --- --- f 4 --- --- --- --- --- | --- --- --- --- " f --- --- --- "
|
[
"noreply@github.com"
] |
oshadura.noreply@github.com
|
86a7eccb3ab0a094a3ddf924c656f167236d265e
|
a0886d451948f51f74f44c39deda8b223b2c51aa
|
/src/classifiers/stream_classifier.py
|
668816b5b2398017f6d5f3db46d696f2260363c2
|
[] |
no_license
|
kykamath/twitter_classifier
|
651b26e8c6f015afa8aeb6f4ba8061da01d36d4d
|
50b00978b1478af427b19aff81c956fbd1ae83fa
|
refs/heads/master
| 2020-04-29T10:43:43.814202
| 2011-04-22T00:22:04
| 2011-04-22T00:22:04
| 1,498,408
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,183
|
py
|
'''
Created on Apr 17, 2011
@author: kykamath
'''
import sys, math
sys.path.append('../')
from datasets import DataDirection, DocumentType, TweetType
from utilities import Utilities
from settings import Settings
from datetime import datetime, timedelta
from collections import defaultdict
from operator import itemgetter
from classifiers import classToIntMap, MultiClassAUC
class FeatureScore:
def __init__(self):
self.score = 0
self.lastUpdateTime = None
def update(self, decayRate, newTime, increaseValue):
if self.lastUpdateTime==None: self.score=increaseValue
else: self.score= self.score*math.pow(decayRate, ((newTime-self.lastUpdateTime).seconds)/3600)+increaseValue
self.lastUpdateTime=newTime
class StreamClassifier(object):
typeDefault='default'
typeFeatureScoreDecay='feature_score_decay'
typeFeatureScoreDecayWithInverseClassFrequency = 'feature_score_decay_with_inverse_class_frequency'
typeNaiveBayesWithLaplaceSmoothing='naive_bayes_with_laplace_smoothing'
notClassified = 'not_classified'
numberOfClasses = 4
def __init__(self, type, numberOfInitialBufferDays=1, classifyingMethod=None, **kwargs):
self.featureMap = {}
self.type = type
self.numberOfInitialBufferDays = numberOfInitialBufferDays
self.classifyingMethod = classifyingMethod
if self.classifyingMethod==None: self.classifyingMethod = self.classify
self.kwargs = kwargs
self.numberOfTestTweets, self.classifiedDocuments = 0, []
def start(self):
i=1
firstDay = Settings.startTime+timedelta(days=self.numberOfInitialBufferDays)
for tweet in Utilities.getTweets(fileNameMethod=Utilities.getStreamingSetsFile, dataDirection=DataDirection.future, completeTweets=True, **self.kwargs):
tweetTimeStamp = datetime.strptime(tweet['created_at'], Settings.twitter_api_time_format)
if tweet['tweet_type'] == TweetType.train: self.learnFromTweet(tweet)
else:
if firstDay<tweetTimeStamp: self.classifyingMethod(tweet, self.classifyTweet(tweet))
def classify(self, tweet, perClassScores):
sortedScores = sorted(perClassScores.iteritems(), key=itemgetter(1), reverse=True)
if sortedScores: return sortedScores[0][0]
return StreamClassifier.notClassified
def classifyForAUCM(self, tweet, perClassScores):
tempDict = {}
if perClassScores:
total = sum(v for v in perClassScores.itervalues())
for k in perClassScores: perClassScores[k]=perClassScores[k]/total
sortedScores = sorted(perClassScores.iteritems(), key=itemgetter(1), reverse=True)
# if sortedScores[0][1]>=Utilities.my_log(Settings.stream_classifier_class_probability_threshold):
if sortedScores[0][1]>=Settings.stream_classifier_class_probability_threshold:
for classLabel, classId in classToIntMap.iteritems():
if classLabel not in perClassScores: tempDict[classId]=None
else: tempDict[classId]=perClassScores[classLabel]
self.classifiedDocuments.append((self.numberOfTestTweets, classToIntMap[tweet['class']], tempDict))
self.numberOfTestTweets+=1
def getFeatureProbabilites(self, feature, tweetTime):
mapToReturn = {}
totalScore = 0
for featureScore in feature['class'].itervalues():
featureScore.update(self.decayRate, tweetTime, 0)
totalScore+=featureScore.score
for classLabel, featureScore in feature['class'].iteritems(): mapToReturn[classLabel] = float(featureScore.score)/totalScore
return mapToReturn
def getAUCM(self): return MultiClassAUC(self.classifiedDocuments).getMRevised()
@staticmethod
def extractFeatures(document):
for feature in document:
if feature not in Utilities.stopwords: yield feature
class StreamClassifierFeatureScoreDecay(StreamClassifier):
def __init__(self, decayRate, type=StreamClassifier.typeFeatureScoreDecay, **kwargs):
super(StreamClassifierFeatureScoreDecay, self).__init__(type=type, **kwargs)
self.decayRate=decayRate
def learnFromTweet(self, tweet):
classLabel = tweet['class']
tweetTime = datetime.strptime(tweet['created_at'], Settings.twitter_api_time_format)
for feature in StreamClassifier.extractFeatures(tweet['document']):
if feature not in self.featureMap: self.featureMap[feature] = {'stats': {}, 'class': defaultdict(FeatureScore)}
self.featureMap[feature]['class'][classLabel].update(self.decayRate, tweetTime, 1)
def classifyTweet(self, tweet):
tweetFeatureMap = {}
tweetTime = datetime.strptime(tweet['created_at'], Settings.twitter_api_time_format)
for feature in StreamClassifier.extractFeatures(tweet['document']):
if feature in self.featureMap: tweetFeatureMap[feature]=self.getFeatureProbabilites(self.featureMap[feature], tweetTime)
return self.getPerClassScore(tweetFeatureMap)
def getPerClassScore(self, tweetFeatureMap):
perClassScores = {}
for k, v in tweetFeatureMap.iteritems():
# for classLabel, score in v.iteritems(): perClassScores[classLabel]+=math.log(score)
for classLabel, score in v.iteritems():
if score!=0:
if classLabel not in perClassScores: perClassScores[classLabel]=1
perClassScores[classLabel]*=score
return perClassScores
class StreamClassifierFeatureScoreDecayWithInverseClassFrequency(StreamClassifierFeatureScoreDecay):
def __init__(self, decayRate, type=StreamClassifier.typeFeatureScoreDecayWithInverseClassFrequency, **kwargs):
super(StreamClassifierFeatureScoreDecayWithInverseClassFrequency, self).__init__(decayRate, type=StreamClassifier.typeFeatureScoreDecayWithInverseClassFrequency, **kwargs)
def getPerClassScore(self, tweetFeatureMap):
perClassScores = {}
for k, v in tweetFeatureMap.iteritems():
featureScore = float(StreamClassifier.numberOfClasses)/len(v)
if featureScore!=0:
# for classLabel, score in v.iteritems(): perClassScores[classLabel]+=math.log(featureScore*score)
for classLabel, score in v.iteritems():
if classLabel not in perClassScores: perClassScores[classLabel]=1
if score!=0: perClassScores[classLabel]*=(featureScore*score)
return perClassScores
class StreamClassifierNaiveBayesWithLaplaceSmoothing(StreamClassifier):
def __init__(self, decayRate, **kwargs):
super(StreamClassifierNaiveBayesWithLaplaceSmoothing, self).__init__(type=StreamClassifier.typeNaiveBayesWithLaplaceSmoothing, **kwargs)
self.decayRate=decayRate
self.classStats = defaultdict(FeatureScore)
def learnFromTweet(self, tweet):
classLabel = tweet['class']
tweetTime = datetime.strptime(tweet['created_at'], Settings.twitter_api_time_format)
for feature in StreamClassifier.extractFeatures(tweet['document']):
if feature not in self.featureMap: self.featureMap[feature] = {'stats': {}, 'class': defaultdict(FeatureScore)}
self.featureMap[feature]['class'][classLabel].update(self.decayRate, tweetTime, 1)
self.classStats[classLabel].update(self.decayRate, tweetTime, 1)
def classifyTweet(self, tweet):
tweetTime = datetime.strptime(tweet['created_at'], Settings.twitter_api_time_format)
classProbabilities, totalNumberOffUniqueFeatures = {}, len(self.featureMap)
for classLabel, classFeatureScore in self.classStats.iteritems():
classFeatureScore.update(self.decayRate, tweetTime, 0)
numberOfFeaturesInClass = classFeatureScore.score
for feature in StreamClassifier.extractFeatures(tweet['document']):
featureCountForClass=0
if feature in self.featureMap and classLabel in self.featureMap[feature]['class']:
self.featureMap[feature]['class'][classLabel].update(self.decayRate, tweetTime, 0)
featureCountForClass = self.featureMap[feature]['class'][classLabel].score
# classProbabilities[classLabel]+=math.log((featureCountForClass+1)/(numberOfFeaturesInClass+totalNumberOffUniqueFeatures))
if classLabel not in classProbabilities: classProbabilities[classLabel]=1
classProbabilities[classLabel]*=(featureCountForClass+1)/(numberOfFeaturesInClass+totalNumberOffUniqueFeatures)
return classProbabilities
if __name__ == '__main__':
streamClassifier = StreamClassifierFeatureScoreDecay(decayRate=Settings.stream_classifier_decay_rate, currentTime=Settings.startTime, dataType=DocumentType.typeRuuslUnigram, numberOfExperts=Settings.numberOfExperts, noOfDays=10)
streamClassifier.classifyingMethod = streamClassifier.classifyForAUCM
streamClassifier.start()
print streamClassifier.type, len(streamClassifier.classifiedDocuments), streamClassifier.getAUCM()
|
[
"krishna.kamath@gmail.com"
] |
krishna.kamath@gmail.com
|
49127c9d5ec63cf0edbb76c8518ef71b738cb115
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03282/s807999610.py
|
ae207e71f6f810f99b64ea485d43701412aa2740
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 261
|
py
|
import sys
input = sys.stdin.readline
def main():
S = input().rstrip()
K = int(input())
ans = 1
for k in range(K):
if S[k] != "1":
ans = int(S[k])
break
print(ans)
if __name__ == "__main__":
main()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
4c0d24e4899cef4d0b65d73aa2aadd5eccbd352e
|
1577e1cf4e89584a125cffb855ca50a9654c6d55
|
/pyobjc/pyobjc/pyobjc-framework-LaunchServices-2.5.1/setup.py
|
69e9ac501f5f908146c2de98a08b37554caf8233
|
[
"MIT"
] |
permissive
|
apple-open-source/macos
|
a4188b5c2ef113d90281d03cd1b14e5ee52ebffb
|
2d2b15f13487673de33297e49f00ef94af743a9a
|
refs/heads/master
| 2023-08-01T11:03:26.870408
| 2023-03-27T00:00:00
| 2023-03-27T00:00:00
| 180,595,052
| 124
| 24
| null | 2022-12-27T14:54:09
| 2019-04-10T14:06:23
| null |
UTF-8
|
Python
| false
| false
| 1,339
|
py
|
'''
Wrappers for the "LaunchServices" framework on MacOSX. The API's in this
framework enable applications to open other applictions or their document
files, simularly to how the Dock or Finder do that.
A number of tasks that can be implemented using this framework:
* Launch or activate applications
* Open documents in other applications
* Identify the preferred application for opening a document
* Register information about the kinds of documents an application
can open (UTI's)
* Obtain information for showing a document (display name, icon, ...)
* Maintain and update the contents of the Recent Items menu.
These wrappers don't include documentation, please check Apple's documention
for information on how to use this framework and PyObjC's documentation
for general tips and tricks regarding the translation between Python
and (Objective-)C frameworks
NOTE: This wrapper is not complete, this will change in a future version.
'''
from pyobjc_setup import setup
setup(
name='pyobjc-framework-LaunchServices',
version="2.5.1",
description = "Wrappers for the framework LaunchServices on Mac OS X",
packages = [ "LaunchServices" ],
setup_requires = [
'pyobjc-core>=2.5.1',
],
install_requires = [
'pyobjc-core>=2.5.1',
'pyobjc-framework-Cocoa>=2.5.1',
],
)
|
[
"opensource@apple.com"
] |
opensource@apple.com
|
689c4a2a4fbcae5c4efd6838dbe43bd9740fbf9f
|
98c6ea9c884152e8340605a706efefbea6170be5
|
/examples/data/Assignment_1/vrmnic005/question3.py
|
1c61ad08760818ce92e029b1eb28c9a08c1f20c6
|
[] |
no_license
|
MrHamdulay/csc3-capstone
|
479d659e1dcd28040e83ebd9e3374d0ccc0c6817
|
6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2
|
refs/heads/master
| 2021-03-12T21:55:57.781339
| 2014-09-22T02:22:22
| 2014-09-22T02:22:22
| 22,372,174
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 810
|
py
|
name = input("Enter first name: \n")
surname = input("Enter last name: \n")
money = eval(input("Enter sum of money in USD: \n"))
country = input("Enter country name: \n")
money30 = money*(30/100)
print ()
print ("Dearest ", name,
"\nIt is with a heavy heart that I inform you of the death of my father,\n"
"General Fayk ",surname, ", your long lost relative from Mapsfostol.\n"
"My father left the sum of ", money, "USD for us, your distant cousins.\n"
"Unfortunately, we cannot access the money as it is in a bank in ", country, ".\n"
"I desperately need your assistance to access this money.\n"
"I will even pay you generously, 30% of the amount - ", money30, "USD,\n"
"for your help. Please get in touch with me at this email address asap.\n"
"Yours sincerely\n"
"Frank ", surname,sep ='')
|
[
"jarr2000@gmail.com"
] |
jarr2000@gmail.com
|
45119b2bfb441e6442a560c850e77270bbf09204
|
3b96724c917b3cbbf39a03b42a0c3570cd3714c0
|
/lsstetc.py
|
8ea7790fcc4440ab82fa13614169f62c758a1a7b
|
[] |
no_license
|
wadawson/LSST_ETC
|
44b24737a0515b7d76d74f068c21a66e1ac7f3e6
|
b893ad6c8c162b2b784b67e5e2a6de05cfb1c75b
|
refs/heads/master
| 2020-07-26T04:42:54.654982
| 2015-08-24T23:05:09
| 2015-08-24T23:05:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,640
|
py
|
"""An exposure time calculator for LSST. Uses GalSim to draw a galaxy with specified magnitude,
shape, etc, and then uses the same image as the optimal weight function. Derived from D. Kirkby's
notes on deblending.
"""
import numpy as np
import galsim
# Some constants
# --------------
#
# LSST effective area in meters^2
A = 319/9.6 # etendue / FoV. I *think* this includes vignetting
# zeropoints from DK notes in photons per second per pixel
# should eventually compute these on the fly from filter throughput functions.
s0 = {'u': A*0.732,
'g': A*2.124,
'r': A*1.681,
'i': A*1.249,
'z': A*0.862,
'Y': A*0.452}
# Sky brightnesses in AB mag / arcsec^2.
# stole these from http://www.lsst.org/files/docs/gee_137.28.pdf
# should eventually construct a sky SED (varies with the moon phase) and integrate to get these
B = {'u': 22.8,
'g': 22.2,
'r': 21.3,
'i': 20.3,
'z': 19.1,
'Y': 18.1}
# number of visits
# From LSST Science Book
fiducial_nvisits = {'u': 56,
'g': 80,
'r': 180,
'i': 180,
'z': 164,
'Y': 164}
# exposure time per visit
visit_time = 30.0
# Sky brightness per arcsec^2 per second
sbar = {}
for k in B:
sbar[k] = s0[k] * 10**(-0.4*(B[k]-24.0))
# And some random numbers for drawing
bd = galsim.BaseDeviate(1)
class ETC(object):
def __init__(self, band, pixel_scale=None, stamp_size=None, threshold=0.0,
nvisits=None):
self.pixel_scale = pixel_scale
self.stamp_size = stamp_size
self.threshold = threshold
self.band = band
if nvisits is None:
self.exptime = fiducial_nvisits[band] * visit_time
else:
self.exptime = nvisits * visit_time
self.sky = sbar[band] * self.exptime * self.pixel_scale**2
self.sigma_sky = np.sqrt(self.sky)
self.s0 = s0[band]
def draw(self, profile, mag, noise=False):
img = galsim.ImageD(self.stamp_size, self.stamp_size, scale=self.pixel_scale)
flux = self.s0 * 10**(-0.4*(mag - 24.0)) * self.exptime
profile = profile.withFlux(flux)
profile.drawImage(image=img)
if noise:
gd = galsim.GaussianNoise(bd, sigma=self.sigma_sky)
img.addNoise(gd)
return img
def SNR(self, profile, mag):
img = self.draw(profile, mag, noise=False)
mask = img.array > (self.threshold * self.sigma_sky)
imgsqr = img.array**2*mask
signal = imgsqr.sum()
noise = np.sqrt((imgsqr * self.sky).sum())
return signal / noise
def err(self, profile, mag):
snr = self.SNR(profile, mag)
return 2.5 / np.log(10) / snr
def display(self, profile, mag, noise=True):
img = self.draw(profile, mag, noise)
import matplotlib.pyplot as plt
import matplotlib.cm as cm
plt.imshow(img.array, cmap=cm.Greens)
plt.colorbar()
plt.show()
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser()
# Filter
parser.add_argument("--band", default='i',
help="band for simulation (Default 'i')")
# PSF structural arguments
PSF_profile = parser.add_mutually_exclusive_group()
PSF_profile.add_argument("--kolmogorov", action="store_true",
help="Use Kolmogorov PSF (Default Gaussian)")
PSF_profile.add_argument("--moffat", action="store_true",
help="Use Moffat PSF (Default Gaussian)")
parser.add_argument("--PSF_beta", type=float, default=3.0,
help="Set beta parameter of Moffat profile PSF. (Default 2.5)")
parser.add_argument("--PSF_FWHM", type=float, default=0.67,
help="Set FWHM of PSF in arcsec (Default 0.67).")
parser.add_argument("--PSF_phi", type=float, default=0.0,
help="Set position angle of PSF in degrees (Default 0.0).")
parser.add_argument("--PSF_ellip", type=float, default=0.0,
help="Set ellipticity of PSF (Default 0.0)")
# Galaxy structural arguments
parser.add_argument("-n", "--sersic_n", type=float, default=1.0,
help="Sersic index (Default 1.0)")
parser.add_argument("--gal_ellip", type=float, default=0.3,
help="Set ellipticity of galaxy (Default 0.3)")
parser.add_argument("--gal_phi", type=float, default=0.0,
help="Set position angle of galaxy in radians (Default 0.0)")
parser.add_argument("--gal_HLR", type=float, default=0.2,
help="Set galaxy half-light-radius. (default 0.5 arcsec)")
# Simulation input arguments
parser.add_argument("--pixel_scale", type=float, default=0.2,
help="Set pixel scale in arcseconds (Default 0.2)")
parser.add_argument("--stamp_size", type=int, default=31,
help="Set postage stamp size in pixels (Default 31)")
# Magnitude!
parser.add_argument("--mag", type=float, default=25.3,
help="magnitude of galaxy")
# threshold
parser.add_argument("--threshold", type=float, default=0.0,
help="Threshold, in sigma-sky units, above which to include pixels")
# Observation characteristics
parser.add_argument("--nvisits", type=int, default=None)
# draw the image!
parser.add_argument("--display", action='store_true',
help="Display image used to compute SNR.")
args = parser.parse_args()
if args.kolmogorov:
psf = galsim.Kolmogorov(fwhm=args.PSF_FWHM)
elif args.moffat:
psf = galsim.Moffat(fwhm=args.PSF_FWHM, beta=args.PSF_beta)
else:
psf = galsim.Gaussian(fwhm=args.PSF_FWHM)
psf = psf.shear(e=args.PSF_ellip, beta=args.PSF_phi*galsim.radians)
gal = galsim.Sersic(n=args.sersic_n, half_light_radius=args.gal_HLR)
gal = gal.shear(e=args.gal_ellip, beta=args.gal_phi*galsim.radians)
profile = galsim.Convolve(psf, gal)
etc = ETC(args.band, pixel_scale=args.pixel_scale, stamp_size=args.stamp_size,
threshold=args.threshold, nvisits=args.nvisits)
print
print "input"
print "------"
print "band: {}".format(args.band)
print "magnitude: {}".format(args.mag)
print
print "output"
print "------"
print "SNR: {}".format(etc.SNR(profile, args.mag))
print "mag err: {}".format(etc.err(profile, args.mag))
if args.display:
etc.display(profile, args.mag)
|
[
"jmeyers314@gmail.com"
] |
jmeyers314@gmail.com
|
fda72b8c00fbe56089e69e878bc92dd0b2d869cf
|
8a9c26468d352f52e2773ee3d7f97fae25a9f4f2
|
/example/geo_example.py
|
81095e70b182653736e08b698dc9065e7b9ab480
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
Watemlifts/pyecharts
|
76b301a0013cf628e22581b10bfba94e600ba788
|
42c9af85877c812449ad8d3aa942135e95468714
|
refs/heads/master
| 2022-01-04T04:18:40.615309
| 2019-07-02T18:49:02
| 2019-07-02T18:49:02
| 194,917,624
| 1
| 0
|
MIT
| 2021-12-30T19:54:49
| 2019-07-02T18:44:50
|
Python
|
UTF-8
|
Python
| false
| false
| 4,238
|
py
|
from example.commons import Collector, Faker
from pyecharts import options as opts
from pyecharts.charts import Geo, Page
from pyecharts.globals import ChartType, SymbolType
C = Collector()
@C.funcs
def geo_base() -> Geo:
c = (
Geo()
.add_schema(maptype="china")
.add("geo", [list(z) for z in zip(Faker.provinces, Faker.values())])
.set_series_opts(label_opts=opts.LabelOpts(is_show=False))
.set_global_opts(
visualmap_opts=opts.VisualMapOpts(),
title_opts=opts.TitleOpts(title="Geo-基本示例"),
)
)
return c
@C.funcs
def geo_visualmap_piecewise() -> Geo:
c = (
Geo()
.add_schema(maptype="china")
.add("geo", [list(z) for z in zip(Faker.provinces, Faker.values())])
.set_series_opts(label_opts=opts.LabelOpts(is_show=False))
.set_global_opts(
visualmap_opts=opts.VisualMapOpts(is_piecewise=True),
title_opts=opts.TitleOpts(title="Geo-VisualMap(分段型)"),
)
)
return c
@C.funcs
def geo_effectscatter() -> Geo:
c = (
Geo()
.add_schema(maptype="china")
.add(
"geo",
[list(z) for z in zip(Faker.provinces, Faker.values())],
type_=ChartType.EFFECT_SCATTER,
)
.set_series_opts(label_opts=opts.LabelOpts(is_show=False))
.set_global_opts(title_opts=opts.TitleOpts(title="Geo-EffectScatter"))
)
return c
@C.funcs
def geo_heatmap() -> Geo:
c = (
Geo()
.add_schema(maptype="china")
.add(
"geo",
[list(z) for z in zip(Faker.provinces, Faker.values())],
type_=ChartType.HEATMAP,
)
.set_series_opts(label_opts=opts.LabelOpts(is_show=False))
.set_global_opts(
visualmap_opts=opts.VisualMapOpts(),
title_opts=opts.TitleOpts(title="Geo-HeatMap"),
)
)
return c
@C.funcs
def geo_guangdong() -> Geo:
c = (
Geo()
.add_schema(maptype="广东")
.add(
"geo",
[list(z) for z in zip(Faker.guangdong_city, Faker.values())],
type_=ChartType.HEATMAP,
)
.set_series_opts(label_opts=opts.LabelOpts(is_show=False))
.set_global_opts(
visualmap_opts=opts.VisualMapOpts(),
title_opts=opts.TitleOpts(title="Geo-广东地图"),
)
)
return c
@C.funcs
def geo_lines() -> Geo:
c = (
Geo()
.add_schema(maptype="china")
.add(
"",
[("广州", 55), ("北京", 66), ("杭州", 77), ("重庆", 88)],
type_=ChartType.EFFECT_SCATTER,
color="white",
)
.add(
"geo",
[("广州", "上海"), ("广州", "北京"), ("广州", "杭州"), ("广州", "重庆")],
type_=ChartType.LINES,
effect_opts=opts.EffectOpts(
symbol=SymbolType.ARROW, symbol_size=6, color="blue"
),
linestyle_opts=opts.LineStyleOpts(curve=0.2),
)
.set_series_opts(label_opts=opts.LabelOpts(is_show=False))
.set_global_opts(title_opts=opts.TitleOpts(title="Geo-Lines"))
)
return c
@C.funcs
def geo_lines_background() -> Geo:
c = (
Geo()
.add_schema(
maptype="china",
itemstyle_opts=opts.ItemStyleOpts(color="#323c48", border_color="#111"),
)
.add(
"",
[("广州", 55), ("北京", 66), ("杭州", 77), ("重庆", 88)],
type_=ChartType.EFFECT_SCATTER,
color="white",
)
.add(
"geo",
[("广州", "上海"), ("广州", "北京"), ("广州", "杭州"), ("广州", "重庆")],
type_=ChartType.LINES,
effect_opts=opts.EffectOpts(
symbol=SymbolType.ARROW, symbol_size=6, color="blue"
),
linestyle_opts=opts.LineStyleOpts(curve=0.2),
)
.set_series_opts(label_opts=opts.LabelOpts(is_show=False))
.set_global_opts(title_opts=opts.TitleOpts(title="Geo-Lines-background"))
)
return c
Page().add(*[fn() for fn, _ in C.charts]).render()
|
[
"chenjiandongx@qq.com"
] |
chenjiandongx@qq.com
|
2ba1a3b3c29fa6e3f7d79aeb1519606c031c4b7d
|
a9a6b09c53e77c996f552bf48b4625d280044905
|
/utils/annotation.py
|
f5b8223213824777c1f9d9d9bf62656f9cc1d7a8
|
[] |
no_license
|
yanqinghao/AiLab-detectron2
|
8f8f98ae0f3183102c9b9421a4f314c549d5a2d1
|
05d6016ae3f8c397d08eba485b97fd2a25848f3c
|
refs/heads/master
| 2022-11-23T23:21:47.686819
| 2020-04-09T02:25:24
| 2020-04-09T02:25:24
| 229,718,014
| 0
| 0
| null | 2022-11-22T04:19:49
| 2019-12-23T09:17:00
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,431
|
py
|
import os
import itertools
import numpy as np
from suanpan.utils import image, json
from detectron2.structures import BoxMode
def get_balloon_dicts(img_dir, json_file):
"""
Parsing via json
"""
imgs_anns = json.load(json_file)
dataset_dicts = []
imagefile = [i.split(".jpg")[0] + ".jpg" for i in imgs_anns["metadata"].keys()]
for idx, v in enumerate(set(imagefile)):
record = {}
indices = [i for i, x in enumerate(imagefile) if x == v]
filename = os.path.join(img_dir, v)
height, width = image.read(filename).shape[:2]
record["file_name"] = filename
record["image_id"] = idx
record["height"] = height
record["width"] = width
objs = []
for index in indices:
data = list(imgs_anns["metadata"].values())[index]
xy = data["xy"][1:]
px = xy[::2]
py = xy[1::2]
poly = [(x + 0.5, y + 0.5) for x, y in zip(px, py)]
poly = list(itertools.chain.from_iterable(poly))
obj = {
"bbox": [np.min(px), np.min(py), np.max(px), np.max(py)],
"bbox_mode": BoxMode.XYXY_ABS,
"segmentation": [poly],
"category_id": 0,
"iscrowd": 0,
}
objs.append(obj)
record["annotations"] = objs
dataset_dicts.append(record)
return dataset_dicts
|
[
"woshiyanqinghao@gmail.com"
] |
woshiyanqinghao@gmail.com
|
0cf95f6e811eca6da922c27416beebd819534e5c
|
596e92d0d484b6e7eee6d322e72e52748fdeaa5d
|
/sportsdata/nhl_projections/models/nhl_projections_penalty.py
|
8466f7094f77192feb77a587001c870ba09a7517
|
[] |
no_license
|
scottypate/sportsdata
|
f5f61ddc7eb482883f93737c6ce73dd814ed4336
|
a07955ab50bf4fff1ce114ed9895095ff770c473
|
refs/heads/main
| 2023-08-18T16:51:56.452678
| 2021-10-22T12:44:08
| 2021-10-22T12:44:08
| 420,062,350
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,708
|
py
|
# coding: utf-8
"""
NHL v3 Projections
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class NhlProjectionsPenalty(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'penalty_id': 'int',
'period_id': 'int',
'sequence': 'int',
'time_remaining_minutes': 'int',
'time_remaining_seconds': 'int',
'description': 'str',
'penalty_minutes': 'int',
'penalized_team_id': 'int',
'penalized_player_id': 'int',
'drawn_by_team_id': 'int',
'drawn_by_player_id': 'int',
'is_bench_penalty': 'bool',
'bench_penalty_served_by_player_id': 'int'
}
attribute_map = {
'penalty_id': 'PenaltyID',
'period_id': 'PeriodID',
'sequence': 'Sequence',
'time_remaining_minutes': 'TimeRemainingMinutes',
'time_remaining_seconds': 'TimeRemainingSeconds',
'description': 'Description',
'penalty_minutes': 'PenaltyMinutes',
'penalized_team_id': 'PenalizedTeamID',
'penalized_player_id': 'PenalizedPlayerID',
'drawn_by_team_id': 'DrawnByTeamID',
'drawn_by_player_id': 'DrawnByPlayerID',
'is_bench_penalty': 'IsBenchPenalty',
'bench_penalty_served_by_player_id': 'BenchPenaltyServedByPlayerID'
}
def __init__(self, penalty_id=None, period_id=None, sequence=None, time_remaining_minutes=None, time_remaining_seconds=None, description=None, penalty_minutes=None, penalized_team_id=None, penalized_player_id=None, drawn_by_team_id=None, drawn_by_player_id=None, is_bench_penalty=None, bench_penalty_served_by_player_id=None): # noqa: E501
"""NhlProjectionsPenalty - a model defined in Swagger""" # noqa: E501
self._penalty_id = None
self._period_id = None
self._sequence = None
self._time_remaining_minutes = None
self._time_remaining_seconds = None
self._description = None
self._penalty_minutes = None
self._penalized_team_id = None
self._penalized_player_id = None
self._drawn_by_team_id = None
self._drawn_by_player_id = None
self._is_bench_penalty = None
self._bench_penalty_served_by_player_id = None
self.discriminator = None
if penalty_id is not None:
self.penalty_id = penalty_id
if period_id is not None:
self.period_id = period_id
if sequence is not None:
self.sequence = sequence
if time_remaining_minutes is not None:
self.time_remaining_minutes = time_remaining_minutes
if time_remaining_seconds is not None:
self.time_remaining_seconds = time_remaining_seconds
if description is not None:
self.description = description
if penalty_minutes is not None:
self.penalty_minutes = penalty_minutes
if penalized_team_id is not None:
self.penalized_team_id = penalized_team_id
if penalized_player_id is not None:
self.penalized_player_id = penalized_player_id
if drawn_by_team_id is not None:
self.drawn_by_team_id = drawn_by_team_id
if drawn_by_player_id is not None:
self.drawn_by_player_id = drawn_by_player_id
if is_bench_penalty is not None:
self.is_bench_penalty = is_bench_penalty
if bench_penalty_served_by_player_id is not None:
self.bench_penalty_served_by_player_id = bench_penalty_served_by_player_id
@property
def penalty_id(self):
"""Gets the penalty_id of this NhlProjectionsPenalty. # noqa: E501
:return: The penalty_id of this NhlProjectionsPenalty. # noqa: E501
:rtype: int
"""
return self._penalty_id
@penalty_id.setter
def penalty_id(self, penalty_id):
"""Sets the penalty_id of this NhlProjectionsPenalty.
:param penalty_id: The penalty_id of this NhlProjectionsPenalty. # noqa: E501
:type: int
"""
self._penalty_id = penalty_id
@property
def period_id(self):
"""Gets the period_id of this NhlProjectionsPenalty. # noqa: E501
:return: The period_id of this NhlProjectionsPenalty. # noqa: E501
:rtype: int
"""
return self._period_id
@period_id.setter
def period_id(self, period_id):
"""Sets the period_id of this NhlProjectionsPenalty.
:param period_id: The period_id of this NhlProjectionsPenalty. # noqa: E501
:type: int
"""
self._period_id = period_id
@property
def sequence(self):
"""Gets the sequence of this NhlProjectionsPenalty. # noqa: E501
:return: The sequence of this NhlProjectionsPenalty. # noqa: E501
:rtype: int
"""
return self._sequence
@sequence.setter
def sequence(self, sequence):
"""Sets the sequence of this NhlProjectionsPenalty.
:param sequence: The sequence of this NhlProjectionsPenalty. # noqa: E501
:type: int
"""
self._sequence = sequence
@property
def time_remaining_minutes(self):
"""Gets the time_remaining_minutes of this NhlProjectionsPenalty. # noqa: E501
:return: The time_remaining_minutes of this NhlProjectionsPenalty. # noqa: E501
:rtype: int
"""
return self._time_remaining_minutes
@time_remaining_minutes.setter
def time_remaining_minutes(self, time_remaining_minutes):
"""Sets the time_remaining_minutes of this NhlProjectionsPenalty.
:param time_remaining_minutes: The time_remaining_minutes of this NhlProjectionsPenalty. # noqa: E501
:type: int
"""
self._time_remaining_minutes = time_remaining_minutes
@property
def time_remaining_seconds(self):
"""Gets the time_remaining_seconds of this NhlProjectionsPenalty. # noqa: E501
:return: The time_remaining_seconds of this NhlProjectionsPenalty. # noqa: E501
:rtype: int
"""
return self._time_remaining_seconds
@time_remaining_seconds.setter
def time_remaining_seconds(self, time_remaining_seconds):
"""Sets the time_remaining_seconds of this NhlProjectionsPenalty.
:param time_remaining_seconds: The time_remaining_seconds of this NhlProjectionsPenalty. # noqa: E501
:type: int
"""
self._time_remaining_seconds = time_remaining_seconds
@property
def description(self):
"""Gets the description of this NhlProjectionsPenalty. # noqa: E501
:return: The description of this NhlProjectionsPenalty. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this NhlProjectionsPenalty.
:param description: The description of this NhlProjectionsPenalty. # noqa: E501
:type: str
"""
self._description = description
@property
def penalty_minutes(self):
"""Gets the penalty_minutes of this NhlProjectionsPenalty. # noqa: E501
:return: The penalty_minutes of this NhlProjectionsPenalty. # noqa: E501
:rtype: int
"""
return self._penalty_minutes
@penalty_minutes.setter
def penalty_minutes(self, penalty_minutes):
"""Sets the penalty_minutes of this NhlProjectionsPenalty.
:param penalty_minutes: The penalty_minutes of this NhlProjectionsPenalty. # noqa: E501
:type: int
"""
self._penalty_minutes = penalty_minutes
@property
def penalized_team_id(self):
"""Gets the penalized_team_id of this NhlProjectionsPenalty. # noqa: E501
:return: The penalized_team_id of this NhlProjectionsPenalty. # noqa: E501
:rtype: int
"""
return self._penalized_team_id
@penalized_team_id.setter
def penalized_team_id(self, penalized_team_id):
"""Sets the penalized_team_id of this NhlProjectionsPenalty.
:param penalized_team_id: The penalized_team_id of this NhlProjectionsPenalty. # noqa: E501
:type: int
"""
self._penalized_team_id = penalized_team_id
@property
def penalized_player_id(self):
"""Gets the penalized_player_id of this NhlProjectionsPenalty. # noqa: E501
:return: The penalized_player_id of this NhlProjectionsPenalty. # noqa: E501
:rtype: int
"""
return self._penalized_player_id
@penalized_player_id.setter
def penalized_player_id(self, penalized_player_id):
"""Sets the penalized_player_id of this NhlProjectionsPenalty.
:param penalized_player_id: The penalized_player_id of this NhlProjectionsPenalty. # noqa: E501
:type: int
"""
self._penalized_player_id = penalized_player_id
@property
def drawn_by_team_id(self):
"""Gets the drawn_by_team_id of this NhlProjectionsPenalty. # noqa: E501
:return: The drawn_by_team_id of this NhlProjectionsPenalty. # noqa: E501
:rtype: int
"""
return self._drawn_by_team_id
@drawn_by_team_id.setter
def drawn_by_team_id(self, drawn_by_team_id):
"""Sets the drawn_by_team_id of this NhlProjectionsPenalty.
:param drawn_by_team_id: The drawn_by_team_id of this NhlProjectionsPenalty. # noqa: E501
:type: int
"""
self._drawn_by_team_id = drawn_by_team_id
@property
def drawn_by_player_id(self):
"""Gets the drawn_by_player_id of this NhlProjectionsPenalty. # noqa: E501
:return: The drawn_by_player_id of this NhlProjectionsPenalty. # noqa: E501
:rtype: int
"""
return self._drawn_by_player_id
@drawn_by_player_id.setter
def drawn_by_player_id(self, drawn_by_player_id):
"""Sets the drawn_by_player_id of this NhlProjectionsPenalty.
:param drawn_by_player_id: The drawn_by_player_id of this NhlProjectionsPenalty. # noqa: E501
:type: int
"""
self._drawn_by_player_id = drawn_by_player_id
@property
def is_bench_penalty(self):
"""Gets the is_bench_penalty of this NhlProjectionsPenalty. # noqa: E501
:return: The is_bench_penalty of this NhlProjectionsPenalty. # noqa: E501
:rtype: bool
"""
return self._is_bench_penalty
@is_bench_penalty.setter
def is_bench_penalty(self, is_bench_penalty):
"""Sets the is_bench_penalty of this NhlProjectionsPenalty.
:param is_bench_penalty: The is_bench_penalty of this NhlProjectionsPenalty. # noqa: E501
:type: bool
"""
self._is_bench_penalty = is_bench_penalty
@property
def bench_penalty_served_by_player_id(self):
"""Gets the bench_penalty_served_by_player_id of this NhlProjectionsPenalty. # noqa: E501
:return: The bench_penalty_served_by_player_id of this NhlProjectionsPenalty. # noqa: E501
:rtype: int
"""
return self._bench_penalty_served_by_player_id
@bench_penalty_served_by_player_id.setter
def bench_penalty_served_by_player_id(self, bench_penalty_served_by_player_id):
"""Sets the bench_penalty_served_by_player_id of this NhlProjectionsPenalty.
:param bench_penalty_served_by_player_id: The bench_penalty_served_by_player_id of this NhlProjectionsPenalty. # noqa: E501
:type: int
"""
self._bench_penalty_served_by_player_id = bench_penalty_served_by_player_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(NhlProjectionsPenalty, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, NhlProjectionsPenalty):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"scotty.pate@auth0.com"
] |
scotty.pate@auth0.com
|
e2fb1c3d5bc361a3e325f532bbc451d36cff636a
|
da29f1f5b4459fbfec968bb694bedb9586f87b14
|
/new_algs/Sequence+algorithms/Selection+algorithm/cmd_test.py
|
3ddfb17e55983232b9aa2709af1bedd5b4f12fc2
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
coolsnake/JupyterNotebook
|
547806a45a663f090f313dc3e70f779ad9b213c0
|
20d8df6172906337f81583dabb841d66b8f31857
|
refs/heads/master
| 2023-01-13T18:55:38.615312
| 2020-11-17T22:55:12
| 2020-11-17T22:55:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,549
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 12 10:11:34 2018
@author: Casey
file to test command line stuff
"""
import sys
#sys.path.append(C:\\Users\\Casey\\Anaconda3\\Lib\\site-packages;C:\\Users\\Casey\\Anaconda3\\Library\\bin;C:\\Program Files (x86)\\Intel\\iCLS Client\\;C:\\Program Files\\Intel\\iCLS Client\\;C:\\Windows\\system32;C:\\Windows;C:\\Windows\\System32\\Wbem;C:\\Windows\\System32\\WindowsPowerShell\\v1.0\\;C:\\Program Files (x86)\\Intel\\Intel(R) Management Engine Components\\DAL;C:\\Program Files\\Intel\\Intel(R) Management Engine Components\\DAL;C:\\Program Files (x86)\\Intel\\Intel(R) Management Engine Components\\IPT;C:\\Program Files\\Intel\\Intel(R) Management Engine Components\\IPT;C:\\Program Files (x86)\\NVIDIA Corporation\\PhysX\\Common;C:\\Program Files\\Intel\\WiFi\\bin\\;C:\\Program Files\\Common Files\\Intel\\WirelessCommon\\;C:\\Program Files\\PuTTY\\;C:\\WINDOWS\\system32;C:\\WINDOWS;C:\\WINDOWS\\System32\\Wbem;C:\\WINDOWS\\System32\\WindowsPowerShell\\v1.0\\;C:\\WINDOWS\\System32\\OpenSSH\\;E:\\Casey\\Matlab\\runtime\\win64;E:\\Casey\\Matlab\\bin;E:\\Casey\\Documents\\Git\\cmd;C:\\Program Files\\OpenVPN\\bin;C:\\Program Files\\nodejs\\;C:\\Users\\Casey\\Anaconda3;C:\\Users\\Casey\\Anaconda3\\Scripts;C:\\Users\\Casey\\Anaconda3\\Library\\bin;C:\\Users\\Casey\\AppData\\Local\\Microsoft\\WindowsApps;C:\\Program Files (x86)\\SSH Communications Security\\SSH Secure Shell;C:\\Users\\Casey\\AppData\\Local\\GitHubDesktop\\bin;C:\\Users\\Casey\\AppData\\Roaming\\npm;)
import ast
import numpy as np
from test import test
#import pandas as pd
#from sklearn import linear_model,svm
#from sklearn.model_selection import cross_val_score
#from itertools import chain
import manually_choose_features
import single_feature_classifier
import recursive_feature_elimination
def main():
input = sys.argv
#input[0] is file name
#later, specify model (i.e. regression) and data set.
#if model is regression, specify feature index to compare
#if model is classification, specify classes somehow, either manually, through a file, create dynamically, etc.
#give option for cross val and num of folds
#for relevant options, specify number of features to stop at
#add some normalization options and specify a default
#add a decision tree
#ideally should have input validation
#adding exceptions for args below would be nice
#create an option to try all feature selection methods with given ml algs for best feature set overall
#consider having separate data processing and feature selection components
#it would be interesting to put the time to run on a file. Perhaps have that as an arg
#it may be interesting to allow for variables entered from the command line to be used as x an y as well
if(input[1] == '-manual'):#the person wants to manually enter feature indeces. Probably not commonly recommended
mlAlg = input[2]
XFile = './datasets/' + input[3]
XFeatures = ast.literal_eval(input[4])
yFile = './datasets/' + input[5]
yFeature = ast.literal_eval(input[6])
manually_choose_features.enterFeatureIndeces(XFeatures,yFeature,XFile,yFile,mlAlg)
elif(input[1] == '-sfc'):#single feature classifier
mlAlg = input[2]
checkMLAlg(mlAlg)
XFile = './datasets/' + input[3]
yFile = './datasets/' + input[4]
finalFeatureSetSize = input[5]#check that this is an int
print('single feature classifier\n')
#f = open("file.txt","a")
#f.write('aardvark'+ '\n')
#f.close
#print(XFile)
#exit()
single_feature_classifier.specifyDataset(XFile,yFile,mlAlg,finalFeatureSetSize)
elif(input[1] == '-rfe'):
mlAlg = input[2]
checkMLAlg(mlAlg)
XFile = './datasets/' + input[3]
yFile = './datasets/' + input[4]
finalFeatureSetSize = input[5]#check that this is an int
print('recursive feature elimination\n')
recursive_feature_elimination.specifyDataset(XFile,yFile,mlAlg,finalFeatureSetSize)
def checkMLAlg(_mlAlg):
if _mlAlg == 'lin_reg':
return
elif _mlAlg == 'svm':
return
else:
printMLAlgOptions()
exit()
def printMLAlgOptions():
print("Machine learning algorithms to choose from and their argument names:\n")
print('linear regression: \"lin_reg\"')
print('support vector machine: \"svm\"')
if __name__=="__main__":
main()
exit()
|
[
"chenqh@uci.edu"
] |
chenqh@uci.edu
|
b0b2d646e0da85e1252012bdb744b3cc410bbd8f
|
687a3cc0e531d77e91fbdb27ace757197bc287e3
|
/test/dist_utils.py
|
476e7cecc2456da911283791382cf69d9a887ea7
|
[
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
aporter1350/pytorch
|
b4871331e0a225c0cd028bd176e9ebb5ee730921
|
bb1d9b238dbda5da1ba7b65953558fd2deea1f00
|
refs/heads/master
| 2020-09-13T15:18:00.562350
| 2019-11-20T01:21:34
| 2019-11-20T01:24:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,929
|
py
|
from __future__ import absolute_import, division, print_function, unicode_literals
import threading
from functools import partial, wraps
from os import getenv
import torch.distributed as dist
import torch.distributed.rpc as rpc
if not dist.is_available():
print("c10d not available, skipping tests")
sys.exit(0)
class TestConfig:
__slots__ = ["rpc_backend_name"]
def __init__(self, *args, **kwargs):
assert len(args) == 0, "TestConfig only takes kwargs."
for k, v in kwargs.items():
setattr(self, k, v)
TEST_CONFIG = TestConfig(rpc_backend_name=getenv("RPC_BACKEND_NAME", "PROCESS_GROUP"))
INIT_METHOD_TEMPLATE = "file://{file_name}"
MASTER_RANK = 0
_ALL_NODE_NAMES = set()
_DONE_NODE_NAMES = set()
_TERMINATION_SIGNAL = threading.Event()
def on_master_follower_report_done(worker_name):
assert (
worker_name in _ALL_NODE_NAMES
), "{worker_name} is not expected by master.".format(worker_name=worker_name)
assert (
worker_name not in _DONE_NODE_NAMES
), "{worker_name} report done twice.".format(worker_name=worker_name)
_DONE_NODE_NAMES.add(worker_name)
if _ALL_NODE_NAMES != _DONE_NODE_NAMES:
return
set_termination_signal()
def set_termination_signal():
assert not _TERMINATION_SIGNAL.is_set(), "Termination signal got set twice."
_TERMINATION_SIGNAL.set()
def dist_init(old_test_method=None, setup_rpc=True, clean_shutdown=True):
"""
We use this decorator for setting up and tearing down state since
MultiProcessTestCase runs each `test*` method in a separate process and
each process just runs the `test*` method without actually calling
'setUp' and 'tearDown' methods of unittest.
"""
# If we use dist_init without arguments (ex: @dist_init), old_test_method is
# appropriately set and we return the wrapper appropriately. On the other
# hand if dist_init has arguments (ex: @dist_init(clean_shutdown=False)),
# old_test_method is None and we return a functools.partial which is the real
# decorator that is used and as a result we recursively call dist_init with
# old_test_method and the rest of the arguments appropriately set.
if old_test_method is None:
return partial(
dist_init,
setup_rpc=setup_rpc,
clean_shutdown=clean_shutdown,
)
@wraps(old_test_method)
def new_test_method(self, *arg, **kwargs):
self.worker_id = self.rank
self.worker_name_to_id = {
"worker{}".format(rank): rank for rank in range(self.world_size)
}
if setup_rpc:
global _ALL_NODE_NAMES
_ALL_NODE_NAMES = self.worker_name_to_id.keys()
# Use enough 'num_send_recv_threads' until we fix https://github.com/pytorch/pytorch/issues/26359
rpc.init_rpc(
self_name="worker%d" % self.rank,
backend=self.rpc_backend,
init_method=self.init_method,
self_rank=self.rank,
worker_name_to_id=self.worker_name_to_id,
num_send_recv_threads=16,
)
return_value = old_test_method(self, *arg, **kwargs)
if setup_rpc:
if clean_shutdown:
# Follower reports done.
if self.rank == MASTER_RANK:
on_master_follower_report_done("worker{}".format(MASTER_RANK))
else:
rpc.rpc_async(
"worker{}".format(MASTER_RANK),
on_master_follower_report_done,
args=("worker{}".format(self.rank),),
)
# Master waits for followers to report done.
# Follower waits for master's termination command.
_TERMINATION_SIGNAL.wait()
if self.rank == MASTER_RANK:
# Master sends termination command.
futs = []
for dst_rank in range(self.world_size):
# torch.distributed.rpc module does not support sending to self.
if dst_rank == MASTER_RANK:
continue
dst_name = "worker{}".format(dst_rank)
fut = rpc.rpc_async(dst_name, set_termination_signal, args=())
futs.append(fut)
for fut in futs:
assert fut.wait() is None, "Sending termination signal failed."
# Close RPC. Need to do this even if we don't have a clean shutdown
# since we need to shutdown the RPC agent. If we don't shutdown the
# RPC agent, tests would fail since RPC agent threads, locks and
# condition variables are not properly terminated.
rpc.join_rpc()
return return_value
return new_test_method
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
5648aac33570ad9b8a37e766c09a6bf0f2449f3d
|
ed11f664cbc459c7a4456dd58f2b231edcb22f33
|
/ctm_saas_client/models/variable_names.py
|
6dd582aa1ba63cf9a8ffd6bc9cc44f1eac2484e0
|
[
"BSD-3-Clause"
] |
permissive
|
jpmc216/ctm_python_client
|
c8b8ba60580bf869b3d1e6af9b99737e0a7ea527
|
de44e5012214ec42bb99b7f9b4ebc5394cd14328
|
refs/heads/main
| 2023-08-26T22:06:34.022576
| 2021-10-25T13:41:31
| 2021-10-25T13:41:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,614
|
py
|
# coding: utf-8
"""
Control-M Services
Provides access to BMC Control-M Services # noqa: E501
OpenAPI spec version: 9.20.30
Contact: customer_support@bmc.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from ctm_saas_client.configuration import Configuration
class VariableNames(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'variables': 'list[str]'
}
attribute_map = {
'variables': 'variables'
}
def __init__(self, variables=None, _configuration=None): # noqa: E501
"""VariableNames - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._variables = None
self.discriminator = None
if variables is not None:
self.variables = variables
@property
def variables(self):
"""Gets the variables of this VariableNames. # noqa: E501
Array of pool variables in format %%\\\\PoolName\\AUTOVarInPool. HIDDEN. # noqa: E501
:return: The variables of this VariableNames. # noqa: E501
:rtype: list[str]
"""
return self._variables
@variables.setter
def variables(self, variables):
"""Sets the variables of this VariableNames.
Array of pool variables in format %%\\\\PoolName\\AUTOVarInPool. HIDDEN. # noqa: E501
:param variables: The variables of this VariableNames. # noqa: E501
:type: list[str]
"""
self._variables = variables
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(VariableNames, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, VariableNames):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, VariableNames):
return True
return self.to_dict() != other.to_dict()
|
[
"cmoraes@bmc.com"
] |
cmoraes@bmc.com
|
f0bc1f5d3eaba5d03fdf3717b4edf8d85c9cf035
|
3649dce8b44c72bbfee56adf4e29ca6c5ba2703a
|
/code_up2721.py
|
95431018d2d23835e6f09154a0044a6cddea5ccd
|
[] |
no_license
|
beOk91/code_up
|
03c7aca76e955e3a59d797299749e7fc2457f24a
|
ca1042ce216cc0a80e9b3d3ad363bc29c4ed7690
|
refs/heads/master
| 2022-12-06T08:23:00.788315
| 2020-08-20T11:21:59
| 2020-08-20T11:21:59
| 284,844,571
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 262
|
py
|
num1=input()
num2=input()
num3=input()
if num1[len(num1)-1]==num2[0]:
if num2[len(num2)-1]==num3[0]:
if num3[len(num3)-1]==num1[0]:
print("good")
else:
print("bad")
else:
print("bad")
else:
print("bad")
|
[
"be_ok91@naver.com"
] |
be_ok91@naver.com
|
d91bc0db5f2379799c817125ffb8dd2f36f295e9
|
b81668a2cc43654cf6a3ed952d781310876838f9
|
/venv/Lib/site-packages/spacy/tests/regression/test_issue4924.py
|
b240f6d4a49a39413e8609221ea61fab8295918d
|
[] |
no_license
|
gowthamr1999/docbot-1
|
6a8b873407f15035fb8b30b69ed66ded343bd1e4
|
3119958d68e95673b4c9187d58d8cad5c18a6b2c
|
refs/heads/master
| 2023-04-07T02:16:55.574750
| 2021-04-16T02:52:38
| 2021-04-16T02:52:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 152
|
py
|
import pytest
from spacy.language import Language
def test_issue4924():
nlp = Language()
docs_golds = [("", {})]
nlp.evaluate(docs_golds)
|
[
"42891786+kiranm211@users.noreply.github.com"
] |
42891786+kiranm211@users.noreply.github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.