hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
acffc3c4325c7c1e2ca199cc4221f5c4c74b9ede | 11,253 | py | Python | mltk/batch_agg.py | haowen-xu/ml-essentials | ca44186be37887461205227c32995f1485b4ff41 | [
"MIT"
] | 4 | 2019-08-06T03:23:14.000Z | 2019-11-08T10:58:54.000Z | mltk/batch_agg.py | haowen-xu/ml-essentials | ca44186be37887461205227c32995f1485b4ff41 | [
"MIT"
] | null | null | null | mltk/batch_agg.py | haowen-xu/ml-essentials | ca44186be37887461205227c32995f1485b4ff41 | [
"MIT"
] | 2 | 2019-12-03T08:09:05.000Z | 2020-10-15T06:50:20.000Z | import operator
from enum import Enum
from functools import reduce
from typing import *
import numpy as np
from .stage import StageType
from .utils import ALL, NOT_SET
__all__ = [
'BatchAggregationMode',
'BatchAggregator', 'BatchAggregatorDict',
]
class BatchAggregator(object):
"""
Class to aggregate batch arrays.
>>> agg = BatchAggregator(BatchAggregationMode.CONCAT)
>>> agg
BatchAggregator(mode=CONCAT, axis=0)
>>> agg.add(np.array([1, 2, 3, 4]))
>>> agg.add(np.array([5, 6]))
>>> agg.get()
array([1, 2, 3, 4, 5, 6])
>>> agg = BatchAggregator(BatchAggregationMode.AVERAGE)
>>> agg
BatchAggregator(mode=AVERAGE, axis=None)
>>> agg.add(np.array([1, 2, 3, 4]))
>>> agg.add(np.array([5, 6]))
>>> agg.get()
3.5
>>> agg = BatchAggregator(BatchAggregationMode.SUM)
>>> agg
BatchAggregator(mode=SUM, axis=None)
>>> agg.add(np.array([1, 2, 3, 4]))
>>> agg.add(np.array([5, 6]))
>>> agg.get()
21
"""
mode: BatchAggregationMode
axis: Union[int, Tuple[int, ...]]
def __init__(self,
mode: Union[str, BatchAggregationMode],
axis: Optional[Union[int, Tuple[int, ...], List[int]]] = NOT_SET):
"""
Construct a new :class:`BatchAggregator`.
Args:
mode: Aggregation mode.
axis: The axis to aggregate. Defaults to `0` for `CONCAT` mode,
while :obj:`None` for `SUM` and `AVERAGE` mode.
"""
mode = BatchAggregationMode(mode)
if axis is NOT_SET:
axis = 0 if mode == BatchAggregationMode.CONCAT else None
if mode == BatchAggregationMode.CONCAT:
if not isinstance(axis, int):
raise TypeError('`axis` must be a int when `mode` is CONCAT.')
if axis is not None:
if hasattr(axis, '__iter__'):
axis = tuple(int(v) for v in axis)
if len(axis) == 1:
axis = axis[0]
else:
axis = int(axis)
self.mode = mode
self.axis = axis
self._buf = None
self._weight_sum = 0.
def get(self) -> Optional[np.ndarray]:
"""
Get the aggregation result.
Returns:
The result, or :obj:`None` if no value has been collected.
"""
if self._buf is not None:
if self.mode == BatchAggregationMode.CONCAT:
return np.concatenate(self._buf, axis=self.axis)
else:
return self._buf
def add(self,
values: np.ndarray,
weight: Optional[float] = 1.):
"""
Add a batch array to the aggregator.
Args:
values: The batch array.
weight: The batch weight, used only in `AVERAGE` mode.
"""
# CONCAT: append the values to the buf
if self.mode == BatchAggregationMode.CONCAT:
if self._buf is None:
self._buf = []
self._buf.append(values)
# SUM
elif self.mode == BatchAggregationMode.SUM:
batch_sum = np.sum(values, axis=self.axis)
if self._buf is None:
self._buf = batch_sum
else:
self._buf += batch_sum
# AVERAGE: maintain the `total_weight` state and update the buf
else:
# infer the batch size and weight
batch_shape = np.shape(values)
if self.axis is None:
batch_size = float(reduce(operator.mul, np.shape(values), 1.))
elif isinstance(self.axis, tuple):
batch_size = 1.
for a in self.axis:
batch_size *= batch_shape[a]
else:
batch_size = batch_shape[self.axis]
batch_weight = weight * batch_size
# do update the weight
self._weight_sum += batch_weight
r1 = weight / self._weight_sum
batch_sum = np.sum(values, axis=self.axis)
if self._buf is None:
self._buf = r1 * batch_sum
else:
r2 = batch_weight / self._weight_sum
self._buf += r1 * batch_sum - r2 * self._buf
class BatchAggregatorDict(Mapping[str, BatchAggregator]):
"""
Maintain a dict of :class:`BatchAggregator` instances, maybe with
a default factory to construct :class:`BatchAggregator` instance
for new keys.
>>> agg_dict = BatchAggregatorDict.new()
>>> agg_dict['acc'].add(np.array([0.75, 0.875]))
>>> agg_dict['loss'].add(np.array([0.125, 0.2]))
>>> len(agg_dict)
2
>>> list(agg_dict)
['acc', 'loss']
>>> agg_dict['acc'].get()
0.8125
>>> agg_dict['loss'].get()
0.1625
"""
@staticmethod
def new(metrics: Union[Sequence[str], type(ALL)] = ALL,
outputs: Union[Sequence[str], type(ALL)] = (),
aggregators: Optional[Mapping[str, BatchAggregator]] = None,
excludes: Sequence[str] = (),
stage_type: Optional[StageType] = None) -> 'BatchAggregatorDict':
"""
Construct a new :class:`BatchAggregatorDict` according to the field
settings `metrics`, `outputs` and `aggregators`.
Args:
metrics: The names of the batch arrays, which should be aggregated
by ``BatchAggregator('AVERAGE', axis=None)``. :obj:`ALL`
indicates that an array is by default a metric if it is neither
specified in `outputs` nor in `aggregator`.
outputs: The names of the batch arrays, which should be aggregated
by ``BatchAggregator('CONCAT', axis=0)``. :obj:`ALL`
indicates that an array is by default an output if it is neither
specified in `outputs` nor in `aggregator`.
aggregators: The dict of names and their corresponding aggregators.
excludes: The names to exclude. If a name is excluded, no
aggregator will be designated to this name, i.e., ``get(name)``
returns None, and ``__getitem__(name)`` raises `KeyError`.
stage_type: If specified, will add stage metric prefix to the keys
of `metrics`, `outputs` and `aggregators`.
Returns:
The aggregator dict.
Notes:
:obj:`ALL` could be specified to at most one of `metrics`
and `outputs`. The argument `aggregators` has higher priority
than `outputs`, and so does `outputs` have higher priority than
`metrics`. That is to say, if a name is specified in both
`aggregators` and `outputs`, then the aggregator specified in
`aggregators` will be chosen; this is also true if a name is
specified in both `outputs` and `metrics`.
"""
# the aggregator factories
average_aggregator_factory = lambda: \
BatchAggregator(mode=BatchAggregationMode.AVERAGE, axis=None)
concat_aggregator_factory = lambda: \
BatchAggregator(mode=BatchAggregationMode.CONCAT, axis=0)
# determine the default factory
if metrics == ALL and outputs == ALL:
raise ValueError('Only one of `metrics` and `outputs` can be '
'`ALL`.')
elif metrics == ALL:
default_factory = average_aggregator_factory
elif outputs == ALL:
default_factory = concat_aggregator_factory
else:
default_factory = None
# build the aggregator instances
agg_dict = {}
if metrics != ALL and metrics:
for key in metrics:
if stage_type is not None:
key = stage_type.add_metric_prefix(key)
agg_dict[key] = average_aggregator_factory()
if outputs != ALL and outputs:
for key in outputs:
if stage_type is not None:
key = stage_type.add_metric_prefix(key)
agg_dict[key] = concat_aggregator_factory()
if aggregators:
for key, agg in aggregators.items():
if stage_type is not None:
key = stage_type.add_metric_prefix(key)
agg_dict[key] = agg
# build the excludes names
if excludes and stage_type is not None:
excludes = [stage_type.add_metric_prefix(n) for n in excludes]
# now construct the `BatchAggregatorDict` instance
return BatchAggregatorDict(
agg_dict, excludes=excludes, default_factory=default_factory)
def __init__(self,
aggregators: Mapping[str, BatchAggregator],
excludes: Sequence[str] = (),
default_factory: Optional[
Callable[[], BatchAggregator]] = None):
"""
Construct a new :class:`BatchAggregatorDict`.
Args:
aggregators: The mapping from names to aggregators.
excludes: The names to exclude from this dict. If a name is
excluded, no aggregator will be designated to this name,
i.e., ``get(name)`` returns None, and ``__getitem__(name)``
raises :class:`KeyError`.
default_factory: The default factory, which is used to create
new :class:`BatchAggregator` instances if the aggregator
to a requested name does not exist. If not specified,
accessing non-existing name will raise an error.
"""
self._aggregators = {}
self._excludes = set(excludes or ())
self._default_factory = default_factory
for key in aggregators:
if key not in self._excludes:
agg = aggregators[key]
if not isinstance(agg, BatchAggregator):
raise TypeError(f'Item {key!r} is not an instance of '
f'{BatchAggregator.__qualname__}: '
f'{agg!r}')
self._aggregators[key] = agg
| 36.3 | 83 | 0.566071 | import operator
from enum import Enum
from functools import reduce
from typing import *
import numpy as np
from .stage import StageType
from .utils import ALL, NOT_SET
__all__ = [
'BatchAggregationMode',
'BatchAggregator', 'BatchAggregatorDict',
]
class BatchAggregationMode(str, Enum):
CONCAT = 'CONCAT'
"""To concat the batch arrays along specified axis."""
SUM = 'SUM'
"""To sum the batch arrays along specified axis."""
AVERAGE = 'AVERAGE'
"""To average the batch arrays along specified axis."""
class BatchAggregator(object):
"""
Class to aggregate batch arrays.
>>> agg = BatchAggregator(BatchAggregationMode.CONCAT)
>>> agg
BatchAggregator(mode=CONCAT, axis=0)
>>> agg.add(np.array([1, 2, 3, 4]))
>>> agg.add(np.array([5, 6]))
>>> agg.get()
array([1, 2, 3, 4, 5, 6])
>>> agg = BatchAggregator(BatchAggregationMode.AVERAGE)
>>> agg
BatchAggregator(mode=AVERAGE, axis=None)
>>> agg.add(np.array([1, 2, 3, 4]))
>>> agg.add(np.array([5, 6]))
>>> agg.get()
3.5
>>> agg = BatchAggregator(BatchAggregationMode.SUM)
>>> agg
BatchAggregator(mode=SUM, axis=None)
>>> agg.add(np.array([1, 2, 3, 4]))
>>> agg.add(np.array([5, 6]))
>>> agg.get()
21
"""
mode: BatchAggregationMode
axis: Union[int, Tuple[int, ...]]
def __init__(self,
mode: Union[str, BatchAggregationMode],
axis: Optional[Union[int, Tuple[int, ...], List[int]]] = NOT_SET):
"""
Construct a new :class:`BatchAggregator`.
Args:
mode: Aggregation mode.
axis: The axis to aggregate. Defaults to `0` for `CONCAT` mode,
while :obj:`None` for `SUM` and `AVERAGE` mode.
"""
mode = BatchAggregationMode(mode)
if axis is NOT_SET:
axis = 0 if mode == BatchAggregationMode.CONCAT else None
if mode == BatchAggregationMode.CONCAT:
if not isinstance(axis, int):
raise TypeError('`axis` must be a int when `mode` is CONCAT.')
if axis is not None:
if hasattr(axis, '__iter__'):
axis = tuple(int(v) for v in axis)
if len(axis) == 1:
axis = axis[0]
else:
axis = int(axis)
self.mode = mode
self.axis = axis
self._buf = None
self._weight_sum = 0.
def __repr__(self):
return f'{self.__class__.__qualname__}' \
f'(mode={self.mode.value}, axis={self.axis})'
def get(self) -> Optional[np.ndarray]:
"""
Get the aggregation result.
Returns:
The result, or :obj:`None` if no value has been collected.
"""
if self._buf is not None:
if self.mode == BatchAggregationMode.CONCAT:
return np.concatenate(self._buf, axis=self.axis)
else:
return self._buf
def add(self,
values: np.ndarray,
weight: Optional[float] = 1.):
"""
Add a batch array to the aggregator.
Args:
values: The batch array.
weight: The batch weight, used only in `AVERAGE` mode.
"""
# CONCAT: append the values to the buf
if self.mode == BatchAggregationMode.CONCAT:
if self._buf is None:
self._buf = []
self._buf.append(values)
# SUM
elif self.mode == BatchAggregationMode.SUM:
batch_sum = np.sum(values, axis=self.axis)
if self._buf is None:
self._buf = batch_sum
else:
self._buf += batch_sum
# AVERAGE: maintain the `total_weight` state and update the buf
else:
# infer the batch size and weight
batch_shape = np.shape(values)
if self.axis is None:
batch_size = float(reduce(operator.mul, np.shape(values), 1.))
elif isinstance(self.axis, tuple):
batch_size = 1.
for a in self.axis:
batch_size *= batch_shape[a]
else:
batch_size = batch_shape[self.axis]
batch_weight = weight * batch_size
# do update the weight
self._weight_sum += batch_weight
r1 = weight / self._weight_sum
batch_sum = np.sum(values, axis=self.axis)
if self._buf is None:
self._buf = r1 * batch_sum
else:
r2 = batch_weight / self._weight_sum
self._buf += r1 * batch_sum - r2 * self._buf
class BatchAggregatorDict(Mapping[str, BatchAggregator]):
"""
Maintain a dict of :class:`BatchAggregator` instances, maybe with
a default factory to construct :class:`BatchAggregator` instance
for new keys.
>>> agg_dict = BatchAggregatorDict.new()
>>> agg_dict['acc'].add(np.array([0.75, 0.875]))
>>> agg_dict['loss'].add(np.array([0.125, 0.2]))
>>> len(agg_dict)
2
>>> list(agg_dict)
['acc', 'loss']
>>> agg_dict['acc'].get()
0.8125
>>> agg_dict['loss'].get()
0.1625
"""
@staticmethod
def new(metrics: Union[Sequence[str], type(ALL)] = ALL,
outputs: Union[Sequence[str], type(ALL)] = (),
aggregators: Optional[Mapping[str, BatchAggregator]] = None,
excludes: Sequence[str] = (),
stage_type: Optional[StageType] = None) -> 'BatchAggregatorDict':
"""
Construct a new :class:`BatchAggregatorDict` according to the field
settings `metrics`, `outputs` and `aggregators`.
Args:
metrics: The names of the batch arrays, which should be aggregated
by ``BatchAggregator('AVERAGE', axis=None)``. :obj:`ALL`
indicates that an array is by default a metric if it is neither
specified in `outputs` nor in `aggregator`.
outputs: The names of the batch arrays, which should be aggregated
by ``BatchAggregator('CONCAT', axis=0)``. :obj:`ALL`
indicates that an array is by default an output if it is neither
specified in `outputs` nor in `aggregator`.
aggregators: The dict of names and their corresponding aggregators.
excludes: The names to exclude. If a name is excluded, no
aggregator will be designated to this name, i.e., ``get(name)``
returns None, and ``__getitem__(name)`` raises `KeyError`.
stage_type: If specified, will add stage metric prefix to the keys
of `metrics`, `outputs` and `aggregators`.
Returns:
The aggregator dict.
Notes:
:obj:`ALL` could be specified to at most one of `metrics`
and `outputs`. The argument `aggregators` has higher priority
than `outputs`, and so does `outputs` have higher priority than
`metrics`. That is to say, if a name is specified in both
`aggregators` and `outputs`, then the aggregator specified in
`aggregators` will be chosen; this is also true if a name is
specified in both `outputs` and `metrics`.
"""
# the aggregator factories
average_aggregator_factory = lambda: \
BatchAggregator(mode=BatchAggregationMode.AVERAGE, axis=None)
concat_aggregator_factory = lambda: \
BatchAggregator(mode=BatchAggregationMode.CONCAT, axis=0)
# determine the default factory
if metrics == ALL and outputs == ALL:
raise ValueError('Only one of `metrics` and `outputs` can be '
'`ALL`.')
elif metrics == ALL:
default_factory = average_aggregator_factory
elif outputs == ALL:
default_factory = concat_aggregator_factory
else:
default_factory = None
# build the aggregator instances
agg_dict = {}
if metrics != ALL and metrics:
for key in metrics:
if stage_type is not None:
key = stage_type.add_metric_prefix(key)
agg_dict[key] = average_aggregator_factory()
if outputs != ALL and outputs:
for key in outputs:
if stage_type is not None:
key = stage_type.add_metric_prefix(key)
agg_dict[key] = concat_aggregator_factory()
if aggregators:
for key, agg in aggregators.items():
if stage_type is not None:
key = stage_type.add_metric_prefix(key)
agg_dict[key] = agg
# build the excludes names
if excludes and stage_type is not None:
excludes = [stage_type.add_metric_prefix(n) for n in excludes]
# now construct the `BatchAggregatorDict` instance
return BatchAggregatorDict(
agg_dict, excludes=excludes, default_factory=default_factory)
def __init__(self,
aggregators: Mapping[str, BatchAggregator],
excludes: Sequence[str] = (),
default_factory: Optional[
Callable[[], BatchAggregator]] = None):
"""
Construct a new :class:`BatchAggregatorDict`.
Args:
aggregators: The mapping from names to aggregators.
excludes: The names to exclude from this dict. If a name is
excluded, no aggregator will be designated to this name,
i.e., ``get(name)`` returns None, and ``__getitem__(name)``
raises :class:`KeyError`.
default_factory: The default factory, which is used to create
new :class:`BatchAggregator` instances if the aggregator
to a requested name does not exist. If not specified,
accessing non-existing name will raise an error.
"""
self._aggregators = {}
self._excludes = set(excludes or ())
self._default_factory = default_factory
for key in aggregators:
if key not in self._excludes:
agg = aggregators[key]
if not isinstance(agg, BatchAggregator):
raise TypeError(f'Item {key!r} is not an instance of '
f'{BatchAggregator.__qualname__}: '
f'{agg!r}')
self._aggregators[key] = agg
def get(self, item: str, default: Any = None) -> Optional[BatchAggregator]:
if item not in self._excludes:
if item not in self._aggregators:
if self._default_factory is not None:
self._aggregators[item] = self._default_factory()
else:
return default
return self._aggregators[item]
def __getitem__(self, item: str) -> BatchAggregator:
ret = self.get(item)
if ret is None:
raise KeyError(item)
return ret
def __len__(self) -> int:
return len(self._aggregators)
def __iter__(self) -> Iterator[str]:
return iter(self._aggregators)
| 704 | 257 | 158 |
d18d2231e33b1398489d8cb8ae750e5dfd0e3996 | 32 | py | Python | Exercicios - Mundo1/Ex001.py | BrianMath/ExerciciosPythonCeV | 4960f1a58d281b32afd5dfd6ea65e0ae5ad48b4f | [
"MIT"
] | null | null | null | Exercicios - Mundo1/Ex001.py | BrianMath/ExerciciosPythonCeV | 4960f1a58d281b32afd5dfd6ea65e0ae5ad48b4f | [
"MIT"
] | null | null | null | Exercicios - Mundo1/Ex001.py | BrianMath/ExerciciosPythonCeV | 4960f1a58d281b32afd5dfd6ea65e0ae5ad48b4f | [
"MIT"
] | null | null | null | # Ex. 001
print("Olá, Mundo!")
| 8 | 20 | 0.5625 | # Ex. 001
print("Olá, Mundo!")
| 0 | 0 | 0 |
d28049b5492eb31932111a9fa01bbadbb299b94d | 244 | py | Python | symposion_project/proposals/admin.py | pyconca/2013-web | 3d4169e39ae850a26cf19bef37a3f0ca36e67d7f | [
"BSD-3-Clause"
] | null | null | null | symposion_project/proposals/admin.py | pyconca/2013-web | 3d4169e39ae850a26cf19bef37a3f0ca36e67d7f | [
"BSD-3-Clause"
] | null | null | null | symposion_project/proposals/admin.py | pyconca/2013-web | 3d4169e39ae850a26cf19bef37a3f0ca36e67d7f | [
"BSD-3-Clause"
] | null | null | null | from django.contrib import admin
from symposion_project.proposals.models import TalkProposal, TutorialProposal, LightningProposal
admin.site.register(TalkProposal)
admin.site.register(TutorialProposal)
admin.site.register(LightningProposal)
| 27.111111 | 96 | 0.864754 | from django.contrib import admin
from symposion_project.proposals.models import TalkProposal, TutorialProposal, LightningProposal
admin.site.register(TalkProposal)
admin.site.register(TutorialProposal)
admin.site.register(LightningProposal)
| 0 | 0 | 0 |
2cf4f647c737564aa1f5d633802604135bbaa2d3 | 444 | py | Python | blacklist/forms.py | milleruk/allianceauth-blacklist | cd58520f8362b5dd22f6249a89a829a6b79da61b | [
"MIT"
] | null | null | null | blacklist/forms.py | milleruk/allianceauth-blacklist | cd58520f8362b5dd22f6249a89a829a6b79da61b | [
"MIT"
] | null | null | null | blacklist/forms.py | milleruk/allianceauth-blacklist | cd58520f8362b5dd22f6249a89a829a6b79da61b | [
"MIT"
] | null | null | null | from django import forms
| 34.153846 | 71 | 0.759009 | from django import forms
class EveNoteForm(forms.Form):
reason = forms.CharField(label='Reason', widget=forms.Textarea)
blacklisted = forms.BooleanField(label='Blacklist', required=False)
restricted = forms.BooleanField(label='Restricted', required=False)
class AddComment(forms.Form):
comment = forms.CharField(label='Comment', widget=forms.Textarea)
restricted = forms.BooleanField(label='Restricted', required=False)
| 0 | 371 | 46 |
d6e9732ca672a71c416021c2409f40951babaccc | 813 | py | Python | example/messaging/check-credit-usage.py | sourcery-ai-bot/kaleyra-python | 782333149e7f8673f383a5743f4ed06efb867d36 | [
"MIT"
] | 2 | 2020-11-21T18:10:46.000Z | 2021-09-21T19:28:03.000Z | example/messaging/check-credit-usage.py | sourcery-ai-bot/kaleyra-python | 782333149e7f8673f383a5743f4ed06efb867d36 | [
"MIT"
] | 3 | 2021-11-11T15:04:46.000Z | 2021-11-11T15:15:47.000Z | example/messaging/check-credit-usage.py | sourcery-ai-bot/kaleyra-python | 782333149e7f8673f383a5743f4ed06efb867d36 | [
"MIT"
] | 7 | 2019-08-05T18:46:22.000Z | 2021-11-15T16:49:34.000Z | #!/usr/bin/env python
from api.messaging.sms.sms_message_request import SMSMessageRequest
__author__ = "Likhit Jain and Yashita P Jain"
__copyright__ = "Copyright 2019, Kaleyra"
__license__ = "MIT"
__version__ = "1.0"
__email__ = "support@kaleyra.com"
__status__ = "Production"
# User will be able to check account usage for a given period.
# from_date, to_date are mandatory parameters.
# Format of the date has to be specified.
smsMessageRequest = SMSMessageRequest(from_date='', to_date='', format='')
smsMessageResponse = smsMessageRequest.credit_usage()
print(smsMessageResponse.to_json())
print(smsMessageResponse.get_status())
print(smsMessageResponse.get_total_credits())
print(smsMessageResponse.get_total_sms())
print(smsMessageResponse.get_start_date())
print(smsMessageResponse.get_end_date())
| 30.111111 | 74 | 0.797048 | #!/usr/bin/env python
from api.messaging.sms.sms_message_request import SMSMessageRequest
__author__ = "Likhit Jain and Yashita P Jain"
__copyright__ = "Copyright 2019, Kaleyra"
__license__ = "MIT"
__version__ = "1.0"
__email__ = "support@kaleyra.com"
__status__ = "Production"
# User will be able to check account usage for a given period.
# from_date, to_date are mandatory parameters.
# Format of the date has to be specified.
smsMessageRequest = SMSMessageRequest(from_date='', to_date='', format='')
smsMessageResponse = smsMessageRequest.credit_usage()
print(smsMessageResponse.to_json())
print(smsMessageResponse.get_status())
print(smsMessageResponse.get_total_credits())
print(smsMessageResponse.get_total_sms())
print(smsMessageResponse.get_start_date())
print(smsMessageResponse.get_end_date())
| 0 | 0 | 0 |
f142c9575b6ac762f1f77e3c862d5fb6ba209f3a | 10,836 | py | Python | agent/src/Custom_JMX/yarn.py | ekbanasolutions/aditas | 2e8291202cb6781a1a0855a1cccb86348b710b71 | [
"MIT"
] | 2 | 2019-01-03T07:42:45.000Z | 2019-03-15T09:06:12.000Z | agent/src/Custom_JMX/yarn.py | ekbanatechnology/aditas | 2e8291202cb6781a1a0855a1cccb86348b710b71 | [
"MIT"
] | null | null | null | agent/src/Custom_JMX/yarn.py | ekbanatechnology/aditas | 2e8291202cb6781a1a0855a1cccb86348b710b71 | [
"MIT"
] | 2 | 2019-01-21T10:34:44.000Z | 2021-07-06T08:49:41.000Z | import ast
import json
import os
import re
import subprocess
import time
from multiprocessing.pool import ThreadPool
from Postgres_connection.connection import get_postgres_connection
from bigdata_logs.logger import getLoggingInstance
log = getLoggingInstance()
hadoop_bin=os.getenv("hadoop_bin_dir")
get_yarn_cmd = "%syarn node -all -list" % hadoop_bin
get_node_info = "%syarn node -status " % hadoop_bin
get_yarn_master_cmd = "%syarn rmadmin -getAllServiceState" % hadoop_bin
get_nm_address_cmd = "%shdfs getconf -confKey yarn.nodemanager.address" % hadoop_bin
get_host_ip = "getent hosts "
get_status_cmd = "lsof -t -i:"
cluster_id = 0
rpyc_port = 0
updated_at = ""
| 36.982935 | 160 | 0.457549 | import ast
import json
import os
import re
import subprocess
import time
from multiprocessing.pool import ThreadPool
from Postgres_connection.connection import get_postgres_connection
from bigdata_logs.logger import getLoggingInstance
log = getLoggingInstance()
hadoop_bin=os.getenv("hadoop_bin_dir")
get_yarn_cmd = "%syarn node -all -list" % hadoop_bin
get_node_info = "%syarn node -status " % hadoop_bin
get_yarn_master_cmd = "%syarn rmadmin -getAllServiceState" % hadoop_bin
get_nm_address_cmd = "%shdfs getconf -confKey yarn.nodemanager.address" % hadoop_bin
get_host_ip = "getent hosts "
get_status_cmd = "lsof -t -i:"
cluster_id = 0
rpyc_port = 0
updated_at = ""
def get_ip(host):
try:
cmd = get_host_ip + '%s' % host
result = subprocess.check_output([cmd], stderr=subprocess.STDOUT, shell=True).decode("utf-8")
result = result.split()
host_ip = result[0]
return host_ip
except Exception as e:
log.error(e)
def set_running_rpyc_port(result):
for r in result:
r = r.strip().split('\t')
r = r[:-1]
ipc_port = r[0].strip().split(':')[1]
status = r[1].strip()
if status == "RUNNING":
global rpyc_port
rpyc_port = ipc_port
break
def get_yarn():
try:
while True:
global updated_at
updated_at = int(str(time.time()).split(".")[0])
# Updating Resource Manager JMX
update_rm_info()
# Updating Node Manager JMX
i = 0
result = subprocess.check_output(
[get_yarn_cmd], stderr=subprocess.STDOUT, shell=True).decode("utf-8")
result = result.strip().splitlines()
for index, value in enumerate(result):
if value.__contains__("Node-Id"):
i = index + 1
break
result = result[i:]
set_running_rpyc_port(result)
no_of_threadpool = int(len(result) / 10) + 1
pool = ThreadPool(no_of_threadpool)
pool.map(yarn_jmx_update, result, 10)
pool.close()
time.sleep(10)
except Exception as e:
log.error(e)
def yarn_jmx_update(r):
cur = conn = None
try:
r = r.strip().split('\t')
r = r[:-1]
host = r[0].strip().split(':')[0]
ip = get_ip(host)
ipc_port = r[0].strip().split(':')[1]
status = r[1].strip()
web_port = r[2].strip().split(':')[1]
if ipc_port == rpyc_port:
node_info = get_yarn_info(host, rpyc_port)
node_info = ast.literal_eval(node_info)
insert_update_yarn(ip, node_info, rpyc_port, web_port)
else:
conn = get_postgres_connection()
cur = conn.cursor()
sql = "select id from yarn_yarn where ip='%s' and cluster_id=%d and type=0" % (ip, cluster_id)
cur.execute(sql)
row = cur.fetchone()
if row is None:
sql = """INSERT INTO yarn_yarn (ip, type, status, state, web_port, rpyc_port,
cluster_id, updated_at) VALUES
('{0}', {1}, '{2}', {3}, {4}, {5}, {6}, {7}) RETURNING id;""".format(ip,
0,
status,
0,
web_port,
ipc_port,
cluster_id,
updated_at)
else:
# Updating yarn_yarn table with yarn jmx values.
id = row[0]
sql = """UPDATE yarn_yarn set type={0}, status='{1}', state={2}, updated_at={3} where id={4} RETURNING id;""" \
.format(0, status, 0, updated_at, id)
yarn_id = execute_yarn_sql(sql)
metrics_sql = """INSERT INTO yarn_metrics (cpu_capacity, cpu_used, last_health_update, memory_capacity, memory_used,
rack, updated_at, node_id) VALUES({0}, {1}, '{2}', {3}, {4}, '{5}', {6}, {7}) RETURNING id;""".format(0, 0, 0, 0, 0, 0, updated_at, yarn_id)
execute_yarn_sql(metrics_sql)
except Exception as e:
log.error(e)
if cur is not None and conn is not None:
cur.close()
conn.close()
def insert_update_yarn(ip, node_info, rpyc_port, web_port):
cur = conn = None
try:
type = 0
status = node_info.get("Node-State")
state = 0
cpu_capacity = float((node_info.get("CPU-Capacity")).split()[0])
cpu_used = float((node_info.get("CPU-Used")).split()[0])
last_health_update = node_info.get("Last-Health-Update")
memory_capacity = [float(s) for s in re.findall(r'-?\d+\.?\d*', str(node_info.get("Memory-Capacity")))][0]
memory_used = [float(s) for s in re.findall(r'-?\d+\.?\d*', str(node_info.get("Memory-Used")))][0]
rack = node_info.get("Rack")
conn = get_postgres_connection()
cur = conn.cursor()
sql = "select id from yarn_yarn where ip='%s' and cluster_id=%d and type=0" % (ip, cluster_id)
cur.execute(sql)
row = cur.fetchone()
if row is None:
# Inserting yarn_yarn table with yarn jmx values.
sql = """INSERT INTO yarn_yarn (ip, type, status, state, web_port, rpyc_port,
cluster_id, updated_at) VALUES
('{0}', {1}, '{2}', {3}, {4}, {5}, {6}, {7}) RETURNING id;""".format(ip,
type,
status,
state,
web_port,
rpyc_port,
cluster_id,
updated_at)
else:
# Updating yarn_yarn table with yarn jmx values.
id = row[0]
sql = """UPDATE yarn_yarn set type={0}, status='{1}', state={2}, updated_at={3} where id={4} RETURNING id;""" \
.format(type, status, state, updated_at, id)
yarn_id = execute_yarn_sql(sql)
metrics_sql = """INSERT INTO yarn_metrics (cpu_capacity, cpu_used, last_health_update, memory_capacity, memory_used,
rack, updated_at, node_id) VALUES({0}, {1}, '{2}', {3}, {4}, '{5}', {6}, {7}) RETURNING id;""".format(cpu_capacity,
cpu_used,
last_health_update,
memory_capacity,
memory_used,
rack,
updated_at,
yarn_id)
execute_yarn_sql(metrics_sql)
except Exception as e:
log.error(e)
if cur is not None and conn is not None:
cur.close()
conn.close()
def execute_yarn_sql(sql):
cur = conn = None
id = None
try:
conn = get_postgres_connection()
cur = conn.cursor()
cur.execute(sql)
conn.commit()
id = (cur.fetchone())[0]
except Exception as e:
log.error(e)
if cur is not None and conn is not None:
cur.close()
conn.close()
return id
def get_yarn_info(host, port):
try:
node_info = {}
cmd = get_node_info + "%s:%s | sed '1d'" % (host, port)
result = subprocess.check_output(
[cmd], stderr=subprocess.STDOUT, shell=True).decode("utf-8")
result = result.strip().splitlines()
result = result[1:]
for r in result:
r = r.strip().split(':', 1)
key = r[0]
value = r[1]
key = key.strip()
value = value.strip()
node_info[key] = value
return json.dumps(node_info)
except Exception as e:
log.error(e)
def get_rm_state(state):
if state == 'active':
return 1
elif state == 'standby':
return 0
else:
return 0
def update_rm_info(updated_at=None):
cur = conn = None
try:
if updated_at is None:
updated_at = int(str(time.time()).split(".")[0])
result = subprocess.check_output(
[get_yarn_master_cmd], stderr=subprocess.STDOUT, shell=True).decode("utf-8")
result = result.strip().split("\n")
[result.remove(result[i]) for i, s in enumerate(result) if 'Retrying connect to server' in s]
result = result[-2:]
for r in result:
r = r.strip().split(" ")
host = ((r[0]).split(":")[0]).strip()
state = (r[1]).strip()
ip = get_ip(host)
conn = get_postgres_connection()
cur = conn.cursor()
sql = "select id, cluster_id from yarn_yarn where ip='%s' and type=1" % ip
cur.execute(sql)
row = cur.fetchone()
id = row[0]
global cluster_id
cluster_id = row[1]
cur.close()
conn.close()
if state == "active" or state == "standby":
status = "RUNNING"
else:
status = "SHUTDOWN"
sql = """UPDATE yarn_yarn set status='{0}', state={1}, updated_at={2} WHERE id={3} RETURNING id;""". \
format(status, get_rm_state(state),
updated_at,
id)
execute_yarn_sql(sql)
except Exception as e:
log.error(e)
if cur is not None and conn is not None:
cur.close()
conn.close() | 9,948 | 0 | 207 |
4eea49f9fdd8f246e6899d9710f318998dc176a2 | 1,179 | py | Python | src/olympia/api/parsers.py | covariant/addons-server | 41e6ee9e426facb19a1e1ca8d40277cb6f94a7da | [
"BSD-3-Clause"
] | 843 | 2016-02-09T13:00:37.000Z | 2022-03-20T19:17:06.000Z | src/olympia/api/parsers.py | covariant/addons-server | 41e6ee9e426facb19a1e1ca8d40277cb6f94a7da | [
"BSD-3-Clause"
] | 10,187 | 2016-02-05T23:51:05.000Z | 2022-03-31T15:24:44.000Z | src/olympia/api/parsers.py | covariant/addons-server | 41e6ee9e426facb19a1e1ca8d40277cb6f94a7da | [
"BSD-3-Clause"
] | 551 | 2016-02-08T20:32:16.000Z | 2022-03-15T16:49:24.000Z | from rest_framework.parsers import DataAndFiles, MultiPartParser
class MultiPartParser(MultiPartParser):
"""
Parser for multipart form data, which may include file data.
Lifted from https://github.com/tomchristie/django-rest-framework/pull/4026/
to work around request.data being empty when multipart/form-data is posted.
See https://github.com/tomchristie/django-rest-framework/issues/3951
"""
def parse(self, stream, media_type=None, parser_context=None):
"""
Parses the incoming bytestream as a multipart encoded form,
and returns a DataAndFiles object.
`.data` will be a `QueryDict` containing all the form parameters.
`.files` will be a `QueryDict` containing all the form files.
For POSTs, accept Django request parsing. See issue #3951.
"""
parser_context = parser_context or {}
request = parser_context['request']
_request = request._request
if _request.method == 'POST':
return DataAndFiles(_request.POST, _request.FILES)
return super().parse(
stream, media_type=media_type, parser_context=parser_context
)
| 38.032258 | 79 | 0.685327 | from rest_framework.parsers import DataAndFiles, MultiPartParser
class MultiPartParser(MultiPartParser):
"""
Parser for multipart form data, which may include file data.
Lifted from https://github.com/tomchristie/django-rest-framework/pull/4026/
to work around request.data being empty when multipart/form-data is posted.
See https://github.com/tomchristie/django-rest-framework/issues/3951
"""
def parse(self, stream, media_type=None, parser_context=None):
"""
Parses the incoming bytestream as a multipart encoded form,
and returns a DataAndFiles object.
`.data` will be a `QueryDict` containing all the form parameters.
`.files` will be a `QueryDict` containing all the form files.
For POSTs, accept Django request parsing. See issue #3951.
"""
parser_context = parser_context or {}
request = parser_context['request']
_request = request._request
if _request.method == 'POST':
return DataAndFiles(_request.POST, _request.FILES)
return super().parse(
stream, media_type=media_type, parser_context=parser_context
)
| 0 | 0 | 0 |
3e6384f78ff13e875854845743693ace589399d0 | 4,736 | py | Python | rule_surrogate/core/model_base.py | myaooo/rule-surrogate | 3f909062eef86419d86a9d8056521e9be519d537 | [
"MIT"
] | null | null | null | rule_surrogate/core/model_base.py | myaooo/rule-surrogate | 3f909062eef86419d86a9d8056521e9be519d537 | [
"MIT"
] | null | null | null | rule_surrogate/core/model_base.py | myaooo/rule-surrogate | 3f909062eef86419d86a9d8056521e9be519d537 | [
"MIT"
] | null | null | null | import dill as pickle
from typing import Optional, Union
from sklearn.base import ClassifierMixin, RegressorMixin
from sklearn.metrics import log_loss, accuracy_score, mean_squared_error, r2_score
from rule_surrogate import Config
from rule_surrogate.utils.io_utils import get_path, obj2pkl, assert_file_exists
from rule_surrogate.core.metrics import auc_score
FILE_EXTENSION = '.mdl'
CLASSIFICATION = 'classification'
REGRESSION = 'regression'
class SKModelWrapper(ModelBase):
"""A wrapper that wraps models in Sklearn"""
@property
@property
# def score(self, y_true, y_pred):
# raise NotImplementedError("This is the SKModelWrapper base class!")
| 26.909091 | 93 | 0.629856 | import dill as pickle
from typing import Optional, Union
from sklearn.base import ClassifierMixin, RegressorMixin
from sklearn.metrics import log_loss, accuracy_score, mean_squared_error, r2_score
from rule_surrogate import Config
from rule_surrogate.utils.io_utils import get_path, obj2pkl, assert_file_exists
from rule_surrogate.core.metrics import auc_score
FILE_EXTENSION = '.mdl'
CLASSIFICATION = 'classification'
REGRESSION = 'regression'
def _format_name(name):
return get_path(Config.model_dir(), "{}{}".format(name, FILE_EXTENSION))
class ModelInterface:
def predict(self, x):
raise NotImplementedError("Interface class")
class ModelBase(ModelInterface):
def __init__(self, name):
self.name = name
@property
def type(self):
raise NotImplementedError("Base class")
def train(self, x, y, **kwargs):
raise NotImplementedError("Base class")
def test(self, x, y):
"""
:param x:
:param y:
:return: accuracy
"""
# raise NotImplementedError("Base class")
return self.evaluate(x, y, stage='test')
def evaluate(self, x, y, stage='train'):
raise NotImplementedError("Base class")
# def predict_prob(self, x):
# raise NotImplementedError("Base class")
def predict(self, x):
raise NotImplementedError("Base class")
def score(self, y_true, y_pred):
raise NotImplementedError("Base class")
def save(self, filename=None):
if filename is None:
filename = _format_name(self.name)
obj2pkl(self, filename)
@classmethod
def load(cls, filename):
mdl = load_model(filename)
if isinstance(mdl, cls):
return mdl
else:
raise RuntimeError("The loaded file is not a Tree model!")
def load_model(filename: str) -> ModelBase:
assert_file_exists(filename)
with open(filename, "rb") as f:
mdl = pickle.load(f)
# assert isinstance(mdl, ModelBase)
return mdl
class SKModelWrapper(ModelBase):
"""A wrapper that wraps models in Sklearn"""
def __init__(self, problem=CLASSIFICATION, name='wrapper'):
super(SKModelWrapper, self).__init__(name=name)
self._problem = problem
self._model = None # type: Optional[Union[RegressorMixin, ClassifierMixin]]
@property
def type(self):
return "sk-model-wrapper"
@property
def model(self):
return self._model
# raise NotImplementedError("This is the SKModelWrapper base class!")
def train(self, x, y, **kwargs):
self.model.fit(x, y)
# self.evaluate(x, y, stage='train')
def predict_prob(self, x):
assert self._problem == CLASSIFICATION
return self.model.predict_proba(x)
def predict(self, x):
return self.model.predict(x)
# def score(self, y_true, y_pred):
# raise NotImplementedError("This is the SKModelWrapper base class!")
class Classifier(ModelBase):
@property
def type(self):
return 'classifier'
# def train(self, x, y):
# raise NotImplementedError("This is the classifier base class")
def evaluate(self, x, y, stage='train'):
acc = self.accuracy(y, self.predict(x))
loss = self.log_loss(y, self.predict_prob(x))
auc = auc_score(y, self.predict_prob(x), average='macro')
prefix = 'Training'
if stage == 'test':
prefix = 'Testing'
print(prefix + " accuracy: {:.5f}; loss: {:.5f}; auc: {:.5f}".format(acc, loss, auc))
return acc, loss, auc
def predict_prob(self, x):
raise NotImplementedError("This is the classifier base class!")
def score(self, y_true, y_pred):
return self.accuracy(y_true, y_pred)
@staticmethod
def log_loss(y_true, y_prob):
# print(y_true.max())
return log_loss(y_true, y_prob, labels=list(range(y_prob.shape[1])))
@staticmethod
def accuracy(y_true, y_pred):
return accuracy_score(y_true, y_pred)
class Regressor(ModelBase):
@property
def type(self):
return 'regressor'
def evaluate(self, x, y, stage='train'):
"""
:param x:
:param y:
:return: accuracy
"""
s = self.mse(y, self.predict(x))
prefix = 'Training'
if stage == 'test':
prefix = 'Testing'
print(prefix + " mse: {:.5f}".format(s))
return s
def score(self, y_true, y_pred):
return self.mse(y_true, y_pred)
@staticmethod
def mse(y_true, y_pred):
return mean_squared_error(y_true, y_pred)
@staticmethod
def r2(y_true, y_pred):
return r2_score(y_true, y_pred) | 2,360 | 1,367 | 323 |
8f62197fa70260e7332679cdc42f33f42b283aef | 1,835 | py | Python | keygen/generator.py | encode1/keygen-api | 84a69c40766f68daee866b5c305b6a0b1c99f056 | [
"Apache-2.0"
] | null | null | null | keygen/generator.py | encode1/keygen-api | 84a69c40766f68daee866b5c305b6a0b1c99f056 | [
"Apache-2.0"
] | 6 | 2021-03-19T00:40:57.000Z | 2021-09-22T18:41:41.000Z | keygen/generator.py | encode1/keygen-api | 84a69c40766f68daee866b5c305b6a0b1c99f056 | [
"Apache-2.0"
] | null | null | null | """
KeyGenerator uses the RSA keys to encrypt the email
"""
import os
import re
import rsa
import pickle
from base64 import b64encode, b64decode
| 28.230769 | 80 | 0.53406 | """
KeyGenerator uses the RSA keys to encrypt the email
"""
import os
import re
import rsa
import pickle
from base64 import b64encode, b64decode
class KeyGenerator(object):
_pubkey = pickle.loads(b64decode(os.getenv('PUB_KEY')))
_privkey = pickle.loads(b64decode(os.getenv('PRIV_KEY')))
def __init__(self, email):
print(self._pubkey)
print(self._privkey)
self.email = email
def _validate_email(self):
"""
validates email format using regex
:return Boolean:
"""
email_regex = r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)"
if (re.search(email_regex, self.email)):
return True
def generate(self):
"""
Generate the key by signing key with a private Key
:return: str
"""
if not self._validate_email():
return False
key = ''
chunk = ''
seq = "ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890"
signature = rsa.sign(self.email.encode('utf-8'), self._privkey, 'SHA-1')
seed = b64encode(signature).decode('ascii')
for char in seed:
if char not in seq:
continue
key += char
chunk += char
if len(chunk) == 4:
key += '-'
chunk = ''
if len(key) == 25:
key = key[:-1]
break
return key
def validate(self, key): # Todo Needs to be refactored
if not self._validate_email():
return False
signature = b64decode(key)
try:
rsa.verify(self.email.encode('utf-8'), signature, self._pubkey)
except rsa.VerificationError:
return '{0} : Invalid'.format(key)
else:
return '{0} : valid'.format(key)
| 473 | 1,193 | 23 |
6f197ce0a20b9f48a593503aafee8223eb8d7826 | 2,998 | py | Python | site-packages/cinderclient/tests/unit/fixture_data/availability_zones.py | hariza17/freezer_libraries | e0bd890eba5e7438976fb3b4d66c41c128bab790 | [
"PSF-2.0"
] | 74 | 2015-01-29T20:10:47.000Z | 2022-03-03T05:09:00.000Z | site-packages/cinderclient/tests/unit/fixture_data/availability_zones.py | hariza17/freezer_libraries | e0bd890eba5e7438976fb3b4d66c41c128bab790 | [
"PSF-2.0"
] | 6 | 2015-08-10T10:23:42.000Z | 2022-02-16T02:28:22.000Z | site-packages/cinderclient/tests/unit/fixture_data/availability_zones.py | hariza17/freezer_libraries | e0bd890eba5e7438976fb3b4d66c41c128bab790 | [
"PSF-2.0"
] | 125 | 2015-02-24T11:04:51.000Z | 2021-12-23T01:28:05.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from datetime import datetime
from cinderclient.tests.unit.fixture_data import base
# FIXME(jamielennox): use timeutils from oslo
FORMAT = '%Y-%m-%d %H:%M:%S'
REQUEST_ID = 'req-test-request-id'
| 33.685393 | 75 | 0.442628 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from datetime import datetime
from cinderclient.tests.unit.fixture_data import base
# FIXME(jamielennox): use timeutils from oslo
FORMAT = '%Y-%m-%d %H:%M:%S'
REQUEST_ID = 'req-test-request-id'
class Fixture(base.Fixture):
base_url = 'os-availability-zone'
def setUp(self):
super(Fixture, self).setUp()
get_availability = {
"availabilityZoneInfo": [
{
"zoneName": "zone-1",
"zoneState": {"available": True},
"hosts": None,
},
{
"zoneName": "zone-2",
"zoneState": {"available": False},
"hosts": None,
},
]
}
self.requests.register_uri(
'GET', self.url(), json=get_availability,
headers={'x-openstack-request-id': REQUEST_ID}
)
updated_1 = datetime(2012, 12, 26, 14, 45, 25, 0).strftime(FORMAT)
updated_2 = datetime(2012, 12, 26, 14, 45, 24, 0).strftime(FORMAT)
get_detail = {
"availabilityZoneInfo": [
{
"zoneName": "zone-1",
"zoneState": {"available": True},
"hosts": {
"fake_host-1": {
"cinder-volume": {
"active": True,
"available": True,
"updated_at": updated_1,
}
}
}
},
{
"zoneName": "internal",
"zoneState": {"available": True},
"hosts": {
"fake_host-1": {
"cinder-sched": {
"active": True,
"available": True,
"updated_at": updated_2,
}
}
}
},
{
"zoneName": "zone-2",
"zoneState": {"available": False},
"hosts": None,
},
]
}
self.requests.register_uri(
'GET', self.url('detail'), json=get_detail,
headers={'x-openstack-request-id': REQUEST_ID}
)
| 2,159 | 73 | 23 |
27c38e84ddccbff6b7a91df729523fc4d21dc45d | 2,755 | py | Python | imgurpx.py | andrewpx1/apiai-weather-webhook-sample-master | 4cdd28f8d6a8a3fa5d303a163bfdfd24bdb19a80 | [
"Apache-2.0"
] | null | null | null | imgurpx.py | andrewpx1/apiai-weather-webhook-sample-master | 4cdd28f8d6a8a3fa5d303a163bfdfd24bdb19a80 | [
"Apache-2.0"
] | null | null | null | imgurpx.py | andrewpx1/apiai-weather-webhook-sample-master | 4cdd28f8d6a8a3fa5d303a163bfdfd24bdb19a80 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
from __future__ import print_function
from future.standard_library import install_aliases
install_aliases()
from urllib.parse import urlparse, urlencode
from urllib.request import urlopen, Request
from urllib.error import HTTPError
from os.path import splitext
from re import findall
from random import randint
import json
import os
import requests
from flask import Flask
from flask import request
from flask import make_response
# Flask app should start in global layout
app = Flask(__name__)
if __name__ == '__main__':
port = int(os.getenv('PORT', 5000))
print("Starting app on port %d" % port)
app.run(debug=False, port=port, host='0.0.0.0')
| 22.04 | 58 | 0.564428 | #!/usr/bin/env python
from __future__ import print_function
from future.standard_library import install_aliases
install_aliases()
from urllib.parse import urlparse, urlencode
from urllib.request import urlopen, Request
from urllib.error import HTTPError
from os.path import splitext
from re import findall
from random import randint
import json
import os
import requests
from flask import Flask
from flask import request
from flask import make_response
# Flask app should start in global layout
app = Flask(__name__)
def scheck(easi):
if (easi > 60):
return randint(0, 60)
elif (easi <2 ):
return 0
else:
return randint(0, easi)
def processRequest(req):
result = req.get("result")
parameters = result.get("parameters")
prt = parameters.get("any")
baseurl = "http://imgur.com/search/score?"
mydict = {'q': prt}
put = urlencode(mydict)
url = baseurl + put
headers = {}
a = requests.get(url, headers=headers)
b = a.content
c = b.decode()
d = '<span class="sorting-text-align">Found <i>(.+?)<'
e = findall(d,c)
eas = e[0].replace("," , "")
easi = int(eas)
h1 = scheck(easi)
h41 = 'href="/gallery/(.+?)"'
h51 = findall(h41,c)
h61 = h51[h1]
nurl = 'http://imgur.com/gallery/' + h61
a10 = requests.get(nurl, headers=headers)
h7 = a10.content
hdec1 = h7.decode()
h711 = 'src="//i.imgur.com/(.+?)"'
h721 = findall(h711,hdec1)
h73 = h721[0]
h74 = 'http://i.imgur.com/' + h73
h75 = '{"file":"' + h74 + '"}'
data = json.loads(h75)
res = makeWebhookResult(data)
return res
def makeWebhookResult(data):
joke = data.get('file')
# print(json.dumps(item, indent=4))
speech = joke
print("Response:")
print(speech)
if getext(joke) == ".gif":
kik_message = [
{
"type": "video",
"videoUrl": speech
}
]
elif getext(joke) == ".mp4":
kik_message = [
{
"type": "video",
"videoUrl": speech
}
]
else:
kik_message = [
{
"type": "picture",
"picUrl": speech
}
]
print(json.dumps(kik_message))
return {
"speech": speech,
"displayText": speech,
"data": {"kik": kik_message},
# "contextOut": [],
"source": "apiai-weather-webhook-sample"
}
def getext(joke):
parsed = urlparse(joke)
root, ext = splitext(parsed.path)
return ext
if __name__ == '__main__':
port = int(os.getenv('PORT', 5000))
print("Starting app on port %d" % port)
app.run(debug=False, port=port, host='0.0.0.0')
| 1,967 | 0 | 93 |
551c6d3d7d291081a3aabf8a1d4a00b4453d6d37 | 3,169 | py | Python | nn.py | MANU-CHAUHAN/minigrad | a71cdf505974b9d2fb5e1730dcc483ec42e92c17 | [
"MIT"
] | null | null | null | nn.py | MANU-CHAUHAN/minigrad | a71cdf505974b9d2fb5e1730dcc483ec42e92c17 | [
"MIT"
] | null | null | null | nn.py | MANU-CHAUHAN/minigrad | a71cdf505974b9d2fb5e1730dcc483ec42e92c17 | [
"MIT"
] | null | null | null | import random
from engine import Scalar
class Module:
''' base class'''
def zero_grad(self):
'''Zero out all the gradients to clear out accumulated gradients from previous loss. (before backprop)'''
class Neuron(Module):
''' A single node of computation '''
def __init__(self, n_in, non_linear=True):
''' Randomly initialize weights using `random.uniform` '''
self.weights = [Scalar(random.uniform(-1, 1)) for _ in n_in]
self.bias = Scalar(0)
self.non_linear = non_linear
def parameters(self):
''' Get all parameters '''
return self.weights + [self.bias]
class Layer(Module):
''' Class representing single layer in a Neural Network '''
class MLP(Module):
''' A simple feed forward Mutli Layer Perceptron class '''
def __call__(self, x):
''' This is the forward propagation with input x '''
for layer in self.layers:
x = layer(x)
return x | 37.72619 | 221 | 0.643421 | import random
from engine import Scalar
class Module:
''' base class'''
def zero_grad(self):
'''Zero out all the gradients to clear out accumulated gradients from previous loss. (before backprop)'''
def zero_grad(self):
for param in self.parameters():
param.grad = 0
def parameters(self):
return []
class Neuron(Module):
''' A single node of computation '''
def __init__(self, n_in, non_linear=True):
''' Randomly initialize weights using `random.uniform` '''
self.weights = [Scalar(random.uniform(-1, 1)) for _ in n_in]
self.bias = Scalar(0)
self.non_linear = non_linear
def __call__(self, x):
# simple element-wise multiplication and then sum => `dot-product`
output = sum(w * x for w, x in zip(self.weights, x))
output = output + self.bias # add bias term
y = output.relu() if self.non_linear else output # return output with ReLU if non_linear = True else logits
return y
def __repr__(self):
return f"{'ReLU' if self.non_linear else 'Linear'} Neuron({len(self.weights)})"
def parameters(self):
''' Get all parameters '''
return self.weights + [self.bias]
class Layer(Module):
''' Class representing single layer in a Neural Network '''
def __init__(self, n_in, n_out, **kwargs):
# each neuron recieves all the data from input as well as from previous layer's neuron, hence the total connections become input * output. This makes each Neuron with `n_in` and total `n_out` number of Neurons '''
self.neurons = [Neuron(n_in=n_in, **kwargs) for _ in range(n_out)]
def __call__(self, x):
out = [neuron(x) for _ in self.neurons] # apply input to each and every neuron present in current layer
return out[0] if len(out) == 1 else out
def __repr__(self):
return f"Layer([{','.join(str(neuron) for neuron in self.neurons)}])"
def parameters(self):
return [parameter for neuron in self.neurons for parameter in neuron.parameters()]
class MLP(Module):
''' A simple feed forward Mutli Layer Perceptron class '''
def __init__(self, n_in, hidden_units):
assert isinstance(hidden_units, (list, tuple)), 'Please pass the sequence depicting the hidden units for layers in MLP in a list/tuple'
total_size_seq = [n_in] + hidden_units
# create layers using the hidden units sequence and number of inputs with `Layer` class object
self.layers = []
for i in range(len(hidden_units)):
non_linear_flag = True if i != len(hidden_units) - 1 else False
self.layers.append(Layer(n_in=total_size_seq[i], n_out=total_size_seq[i + 1], non_linear=non_linear_flag))
def __call__(self, x):
''' This is the forward propagation with input x '''
for layer in self.layers:
x = layer(x)
return x
def __repr__(self):
return f"MLP ([{','.join(str(layer) for layer in self.layers)}])"
def parameters(self):
return [parameters for layer in self.layers for parameters in layer.parameters()] | 1,893 | 0 | 304 |
3d77627e7314207b9f89032fbd647f63aa3aebb9 | 4,866 | py | Python | model_api/model_training/next_framework/tests/training_util_functions_test.py | INK-USC/LEAN-LIFE | e3d6debc3e4c41145ef1c03236c4cf57bfd8be7d | [
"MIT"
] | 21 | 2020-09-29T12:45:50.000Z | 2022-03-27T13:11:12.000Z | model_api/model_training/next_framework/tests/training_util_functions_test.py | INK-USC/LEAN-LIFE | e3d6debc3e4c41145ef1c03236c4cf57bfd8be7d | [
"MIT"
] | 3 | 2020-12-03T10:34:54.000Z | 2021-03-29T09:01:05.000Z | model_api/model_training/next_framework/tests/training_util_functions_test.py | INK-USC/LEAN-LIFE | e3d6debc3e4c41145ef1c03236c4cf57bfd8be7d | [
"MIT"
] | 3 | 2021-02-14T08:39:02.000Z | 2021-07-29T02:33:14.000Z | import sys
sys.path.append("../")
import random
import training.next_util_functions as func
random_state = 42
random.seed(random_state)
# assert ['not'] == func.extract_queries_from_explanations(text)
# text = "Finally we also handle `backticks as quotes`"
# assert ["backticks as quotes"] == func.extract_queries_from_explanations(text)
# text = "No quotes here though, so should be empty"
# assert [] == func.extract_queries_from_explanations(text) | 39.560976 | 123 | 0.601315 | import sys
sys.path.append("../")
import random
import training.next_util_functions as func
random_state = 42
random.seed(random_state)
def test_build_custom_vocab():
custom_vocab = func.build_custom_vocab("tacred", 10)
actual_custom_vocab = {
'SUBJ-PERSON': 10, 'OBJ-PERSON': 11, 'SUBJ-ORGANIZATION': 12, 'OBJ-ORGANIZATION': 13, 'SUBJ-DATE': 14,
'OBJ-DATE': 15, 'SUBJ-NUMBER': 16, 'OBJ-NUMBER': 17, 'SUBJ-TITLE': 18, 'OBJ-TITLE': 19,
'SUBJ-COUNTRY': 20, 'OBJ-COUNTRY': 21, 'SUBJ-LOCATION': 22, 'OBJ-LOCATION': 23, 'SUBJ-CITY': 24,
'OBJ-CITY': 25, 'SUBJ-MISC': 26, 'OBJ-MISC': 27, 'SUBJ-STATE_OR_PROVINCE': 28, 'OBJ-STATE_OR_PROVINCE': 29,
'SUBJ-DURATION': 30, 'OBJ-DURATION': 31, 'SUBJ-NATIONALITY': 32, 'OBJ-NATIONALITY': 33,
'SUBJ-CAUSE_OF_DEATH': 34, 'OBJ-CAUSE_OF_DEATH': 35, 'SUBJ-CRIMINAL_CHARGE': 36, 'OBJ-CRIMINAL_CHARGE': 37,
'SUBJ-RELIGION': 38, 'OBJ-RELIGION': 39, 'SUBJ-URL': 40, 'OBJ-URL': 41, 'SUBJ-IDEOLOGY': 42,
'OBJ-IDEOLOGY': 43
}
assert custom_vocab == actual_custom_vocab
def test_find_array_start_position():
big_array = [1,2,3,4,4,5,6,67,78]
small_array = [4,4,5,6]
assert func.find_array_start_position(big_array, small_array) == 3
def test_tokenize():
text = "HEY I've got some funKy things! \nIsn't it Funny!! "
tokens = ['hey', 'i', "'ve", 'got', 'some', 'funky', 'things', '!', 'is', "n't", 'it', 'funny', '!', '!']
assert func.tokenize(text) == tokens
text = "SUBJ-PERSON is my friend, they are a OBJ-OCCUPATION down the street."
tokens = ['SUBJ-PERSON', 'is', 'my', 'friend', ',', 'they', 'are', 'a', 'OBJ-OCCUPATION', 'down', 'the', 'street', '.']
assert func.tokenize(text) == tokens
def test_build_vocab():
train = ["Here are some strings",
"Some not so interesting strings!",
"Let's make ThEm MORe Intersting?",
"Yes you can :)",
"Coolio <::>"]
embedding_name = "glove.6B.50d"
vocab = func.build_vocab(train, embedding_name, save=False)
tokens_in_order = ['<unk>', '<pad>', ':', 'some', 'strings', '!', "'s", ':)', '<', '>',
'?', 'are', 'can', 'coolio', 'here', 'interesting', 'intersting', 'let', 'make',
'more', 'not', 'so', 'them', 'yes', 'you']
assert "torchtext.vocab.Vocab" in str(type(vocab))
assert vocab.itos == tokens_in_order
def test_convert_text_to_tokens():
train = ["Here are some strings",
"Some not so interesting strings!",
"Let's make ThEm MORe Intersting?",
"Yes you can :)",
"Coolio <::>"]
embedding_name = "glove.6B.50d"
vocab = func.build_vocab(train, embedding_name, save=False)
custom_vocab = {
"however" : 50,
"going" : 51
}
sample_data = ["Let's make ThEm MORe Intersting?",
"Some not so interesting strings!",
"However, this one is going to have lots of <unk>s"]
tokenized_data = [[17, 6, 18, 22, 19, 16, 10],
[3, 20, 21, 15, 4, 5],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 9, 0]]
assert func.convert_text_to_tokens(sample_data, vocab, func.tokenize) == tokenized_data
tokenized_data = [[17, 6, 18, 22, 19, 16, 10],
[3, 20, 21, 15, 4, 5],
[50, 0, 0, 0, 0, 51, 0, 0, 0, 0, 8, 0, 9, 0]]
assert func.convert_text_to_tokens(sample_data, vocab, func.tokenize, custom_vocab) == tokenized_data
def test_extract_queries_from_explanations():
text = 'First type of "query"'
assert ['query'] == func.extract_queries_from_explanations(text)
text = 'Another "type" of "query"'
assert ['type', 'query'] == func.extract_queries_from_explanations(text)
text = 'Ideally all explanations will only use "double quote\'s", so we can avoid issues with "\'"'
assert ['double quote\'s', '\''] == func.extract_queries_from_explanations(text)
text = "An explanation can use 'single quotes'"
assert ['single quotes'] == func.extract_queries_from_explanations(text)
text = "However, there can be some problems with 'apostrophes like, 's'"
assert ['apostrophes like, '] == func.extract_queries_from_explanations(text)
text = "We can even handle ''double single quotes too''"
assert ['double single quotes too'] == func.extract_queries_from_explanations(text)
text = "Though do \"not\" mix 'quotes'"
# assert ['not'] == func.extract_queries_from_explanations(text)
# text = "Finally we also handle `backticks as quotes`"
# assert ["backticks as quotes"] == func.extract_queries_from_explanations(text)
# text = "No quotes here though, so should be empty"
# assert [] == func.extract_queries_from_explanations(text) | 4,252 | 0 | 138 |
4c58561faba3770f46167adad05650809146160e | 2,305 | py | Python | tests/test_subqueues.py | hadren/mrq | 9851294b04ddb5071e9e366bc6b71b3e1d4a98e8 | [
"MIT"
] | null | null | null | tests/test_subqueues.py | hadren/mrq | 9851294b04ddb5071e9e366bc6b71b3e1d4a98e8 | [
"MIT"
] | null | null | null | tests/test_subqueues.py | hadren/mrq | 9851294b04ddb5071e9e366bc6b71b3e1d4a98e8 | [
"MIT"
] | null | null | null | import time
import pytest
from mrq.job import Job
from mrq.queue import Queue
@pytest.mark.parametrize(["queues", "enqueue_on"], [
[["main/", "second/"], ["main/", "main/sub", "main/sub/nested", "second/x"]],
[["prefix/main/"], ["prefix/main/", "prefix/main/sub", "prefix/main/sub/nested"]],
])
@pytest.mark.parametrize(["queue", "enqueue_on"], [
["main/", ["/main", "main_", "/", "main", "other"]],
["prefix/main/", ["prefix", "prefix/other", "prefix/main"]],
])
@pytest.mark.parametrize(["delimiter"], ["/", ".", "-"])
def test_refresh_interval(worker):
""" Tests that a refresh interval of 0 disables the subqueue detection """
worker.start(queues="test/", flags="--subqueues_refresh_interval=0")
time.sleep(2)
job_id1 = worker.send_task(
"tests.tasks.general.GetTime", {"a": 41},
queue="test/subqueue", block=False)
time.sleep(5)
job1 = Job(job_id1).fetch().data
assert job1["status"] == "queued"
worker.stop()
| 29.935065 | 109 | 0.657701 | import time
import pytest
from mrq.job import Job
from mrq.queue import Queue
@pytest.mark.parametrize(["queues", "enqueue_on"], [
[["main/", "second/"], ["main/", "main/sub", "main/sub/nested", "second/x"]],
[["prefix/main/"], ["prefix/main/", "prefix/main/sub", "prefix/main/sub/nested"]],
])
def test_matchable_subqueues(worker, queues, enqueue_on):
worker.start(queues=" ".join(queues), flags="--subqueues_refresh_interval=0.1")
job_ids = []
for subqueue in enqueue_on:
job_id = worker.send_task("tests.tasks.general.GetTime", {}, queue=subqueue, block=False)
job_ids.append(job_id)
assert all([Job(j).wait(poll_interval=0.01, timeout=3) for j in job_ids])
worker.stop()
@pytest.mark.parametrize(["queue", "enqueue_on"], [
["main/", ["/main", "main_", "/", "main", "other"]],
["prefix/main/", ["prefix", "prefix/other", "prefix/main"]],
])
def test_unmatchable_subqueues(worker, queue, enqueue_on):
worker.start(queues=queue, flags="--subqueues_refresh_interval=0.1")
job_ids = []
for subqueue in enqueue_on:
job_id = worker.send_task("tests.tasks.general.GetTime", {}, queue=subqueue, block=False)
job_ids.append(job_id)
time.sleep(2)
results = [Job(j).fetch().data.get("status") for j in job_ids]
# ensure tasks are not consumed by a worker
assert results == ["queued"] * len(results)
worker.stop()
@pytest.mark.parametrize(["delimiter"], ["/", ".", "-"])
def test_custom_delimiters(worker, delimiter):
queue = "main" + delimiter
subqueue = queue + "subqueue"
worker.start(queues=queue, flags="--subqueues_refresh_interval=0.1 --subqueues_delimiter=%s" % delimiter)
job_id = worker.send_task("tests.tasks.general.GetTime", {}, queue=subqueue, block=False)
Job(job_id).wait(poll_interval=0.01)
worker.stop()
def test_refresh_interval(worker):
""" Tests that a refresh interval of 0 disables the subqueue detection """
worker.start(queues="test/", flags="--subqueues_refresh_interval=0")
time.sleep(2)
job_id1 = worker.send_task(
"tests.tasks.general.GetTime", {"a": 41},
queue="test/subqueue", block=False)
time.sleep(5)
job1 = Job(job_id1).fetch().data
assert job1["status"] == "queued"
worker.stop()
| 1,244 | 0 | 66 |
04f4ab1b9e2bf0dbfeeb420de45c222c9e8ceeeb | 356 | py | Python | src/verify/word_count.py | privong/still-magic | 1d651840497d66d44ff43528f6e1f38e698ce168 | [
"CC-BY-4.0"
] | null | null | null | src/verify/word_count.py | privong/still-magic | 1d651840497d66d44ff43528f6e1f38e698ce168 | [
"CC-BY-4.0"
] | 1 | 2019-05-11T23:42:33.000Z | 2019-05-13T18:48:36.000Z | src/verify/word_count.py | privong/still-magic | 1d651840497d66d44ff43528f6e1f38e698ce168 | [
"CC-BY-4.0"
] | null | null | null | import sys
num_words = 0
count = {}
for word in sys.stdin:
num_words += 1
count[word] = count.get(word, 0) + 1
for word in count:
print('{} {}', word, count[word])
with open('logfile.csv', 'a') as logger:
logger.write('word_count.py,num_words,{}\n'.format(num_words))
logger.write('word_count.py,num_distinct,{}\n'.format(len(count)))
| 27.384615 | 70 | 0.648876 | import sys
num_words = 0
count = {}
for word in sys.stdin:
num_words += 1
count[word] = count.get(word, 0) + 1
for word in count:
print('{} {}', word, count[word])
with open('logfile.csv', 'a') as logger:
logger.write('word_count.py,num_words,{}\n'.format(num_words))
logger.write('word_count.py,num_distinct,{}\n'.format(len(count)))
| 0 | 0 | 0 |
ebeec0234d92bb2d9070e26f0c45f95a600d814b | 300 | py | Python | examples/inverted_pendulum/one_link_runs.py | ashander/opty | 85f8c5d75c924d393edcbc07324088c3266bca31 | [
"BSD-2-Clause-FreeBSD"
] | 63 | 2015-03-07T19:38:10.000Z | 2022-03-31T17:17:53.000Z | examples/inverted_pendulum/one_link_runs.py | ashander/opty | 85f8c5d75c924d393edcbc07324088c3266bca31 | [
"BSD-2-Clause-FreeBSD"
] | 52 | 2015-02-15T17:24:03.000Z | 2021-06-18T16:43:45.000Z | examples/inverted_pendulum/one_link_runs.py | ashander/opty | 85f8c5d75c924d393edcbc07324088c3266bca31 | [
"BSD-2-Clause-FreeBSD"
] | 22 | 2015-05-25T21:28:16.000Z | 2022-03-14T03:55:57.000Z | from pendulum import Identifier
num_links = 1
sample_rate = 100.0
init_type = 'random'
sensor_noise = True
duration = 10.0
for i in range(20):
identifier = Identifier(num_links, duration, sample_rate, init_type,
sensor_noise, False, False)
identifier.identify()
| 23.076923 | 72 | 0.683333 | from pendulum import Identifier
num_links = 1
sample_rate = 100.0
init_type = 'random'
sensor_noise = True
duration = 10.0
for i in range(20):
identifier = Identifier(num_links, duration, sample_rate, init_type,
sensor_noise, False, False)
identifier.identify()
| 0 | 0 | 0 |
d56e19f8410b988a477a969567156413be055ccd | 1,499 | py | Python | train_ResNet_cifar.py | kwonsungil/Faster-RCNN | 673879871a87f60d992eae24e0d8c6a6c0a22cec | [
"Apache-2.0"
] | null | null | null | train_ResNet_cifar.py | kwonsungil/Faster-RCNN | 673879871a87f60d992eae24e0d8c6a6c0a22cec | [
"Apache-2.0"
] | null | null | null | train_ResNet_cifar.py | kwonsungil/Faster-RCNN | 673879871a87f60d992eae24e0d8c6a6c0a22cec | [
"Apache-2.0"
] | null | null | null | import numpy as np
import os
import pickle
from models.ResNet_cifar import ResNet
from utils.load_cifar import load_train
if __name__ == '__main__':
nnum_classes = 10
batch_size = 128
epochs = 100
train_x, train_y = load_train(nnum_classes)
net = ResNet(True)
train_set_len = train_x.shape[0]
r_idx = np.arange(train_x.shape[0])
total_batch = int(train_set_len / batch_size)
for epoch in range(epochs):
# print(train_x[0])
r_idx = np.arange(train_x.shape[0])
np.random.shuffle(r_idx)
train_x = train_x[r_idx]
train_y = train_y[r_idx]
for i in range(total_batch + 1):
if ((i + 1) * batch_size) > train_set_len:
break
batch_x = train_x[i * batch_size: (i + 1) * batch_size]
batch_y = train_y[i * batch_size: (i + 1) * batch_size]
if i % 100 == 0:
global_step, train_loss, train_acc = net.train(batch_x, batch_y, True)
print('%d step\ttrain loss : %.3f\ttrain accuracy : %.3f' % (global_step, train_loss, train_acc))
# val_loss, val_acc = net.validate(test_x[:200], test_y[:200])
# print('%d step\ttrain loss : %.3f\ttrain accuracy : %.3f\tval loss : %.3f\tval accuracy : %.3f' % (
# global_step, train_loss, train_acc, val_loss, val_acc))
else:
_, loss, ac = net.train(batch_x, batch_y, False)
| 34.860465 | 118 | 0.575717 | import numpy as np
import os
import pickle
from models.ResNet_cifar import ResNet
from utils.load_cifar import load_train
if __name__ == '__main__':
nnum_classes = 10
batch_size = 128
epochs = 100
train_x, train_y = load_train(nnum_classes)
net = ResNet(True)
train_set_len = train_x.shape[0]
r_idx = np.arange(train_x.shape[0])
total_batch = int(train_set_len / batch_size)
for epoch in range(epochs):
# print(train_x[0])
r_idx = np.arange(train_x.shape[0])
np.random.shuffle(r_idx)
train_x = train_x[r_idx]
train_y = train_y[r_idx]
for i in range(total_batch + 1):
if ((i + 1) * batch_size) > train_set_len:
break
batch_x = train_x[i * batch_size: (i + 1) * batch_size]
batch_y = train_y[i * batch_size: (i + 1) * batch_size]
if i % 100 == 0:
global_step, train_loss, train_acc = net.train(batch_x, batch_y, True)
print('%d step\ttrain loss : %.3f\ttrain accuracy : %.3f' % (global_step, train_loss, train_acc))
# val_loss, val_acc = net.validate(test_x[:200], test_y[:200])
# print('%d step\ttrain loss : %.3f\ttrain accuracy : %.3f\tval loss : %.3f\tval accuracy : %.3f' % (
# global_step, train_loss, train_acc, val_loss, val_acc))
else:
_, loss, ac = net.train(batch_x, batch_y, False)
| 0 | 0 | 0 |
0dd45a4c1483a9359fd50b2f7912fc5c5c5c34da | 11,837 | py | Python | nvtabular/groupby.py | rjzamora/NVTabular | 068ce230b34b0eeae3fd15379291a0e79632b731 | [
"Apache-2.0"
] | null | null | null | nvtabular/groupby.py | rjzamora/NVTabular | 068ce230b34b0eeae3fd15379291a0e79632b731 | [
"Apache-2.0"
] | null | null | null | nvtabular/groupby.py | rjzamora/NVTabular | 068ce230b34b0eeae3fd15379291a0e79632b731 | [
"Apache-2.0"
] | null | null | null | #
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import cudf
import cupy as cp
import pandas as pd
import rmm
class GroupByMomentsCal(object):
"""
This is the class that GroupByMoments uses to
calculate the basic statistics of the data that
is grouped by a categorical feature.
Parameters
-----------
col : str
column name
col_count : str
column name to get group counts
cont_col : list of str
pre-calculated unique values.
stats : list of str or set of str, default ['count']
count of groups = ['count']
sum of cont_col = ['sum']
mean of cont_col = ['mean']
var of cont_col = ['var']
std of cont_col = ['std']
limit_frac : float, default 0.1
fraction of memory to use during unique id calculation.
gpu_mem_util_limit : float, default 0.8
GPU memory utilization limit during frequency based
calculation. If limit is exceeded, unique ids are moved
to host memory.
gpu_mem_trans_use : float, default 0.8
GPU memory utilization limit during transformation. How much
GPU memory will be used during transformation is calculated
using this parameter.
order_column_name : str, default "order-nvtabular"
a column name to be used to preserve the order of input data.
cudf's merge function doesn't preserve the order of the data
and this column name is used to create a column with integer
values in ascending order.
ddof : int, default "1"
Delta Degrees of Freedom. The divisor used in calculations is
N - ddof, where N represents the number of elements.
"""
def merge(self, gdf):
"""
Merges gdf with the calculated group stats.
Parameters
-----------
gdf : cudf DataFrame
Returns
-----------
stats_joined: cudf DataFrame
"""
order = cudf.Series(cp.arange(gdf.shape[0]))
gdf[self.order_column_name] = order
col_names = []
if self.cont_col is not None:
for i in range(len(self.cont_col)):
col_prefix = f"{self.col}_{self.cont_col[i]}_"
col_names.extend(col_prefix + stat for stat in self.stats_names if stat != "count")
if "count" in self.stats_names:
col_names.append(self.col + "_count")
avail_gpu_mem = rmm.get_info().free
sub_stats_size = int(avail_gpu_mem * self.gpu_mem_trans_use / (self.stats.shape[1] * 8))
if sub_stats_size == 0:
sub_stats_size = 1
stats_joined = None
i = 0
while i < self.stats.shape[0]:
sub_stats = cudf.from_pandas(self.stats.iloc[i : i + sub_stats_size])
joined = gdf[[self.col, self.order_column_name]].merge(
sub_stats, on=[self.col], how="left"
)
joined = joined.sort_values(self.order_column_name)
joined.reset_index(drop=True, inplace=True)
if stats_joined is None:
stats_joined = joined[col_names].copy()
else:
stats_joined = stats_joined.add(joined[col_names], fill_value=0)
i = i + sub_stats_size
joined = cudf.Series([])
gdf.drop(columns=[self.order_column_name], inplace=True)
return stats_joined[col_names]
def fit(self, gdf):
"""
Calculates the requested group stats of gdf and
stores results in the host memory.
Parameters
-----------
gdf : cudf DataFrame
"""
if self.cont_col is None:
groups = gdf[[self.col] + [self.col_count]].groupby([self.col])
else:
groups = gdf[[self.col] + self.cont_col + [self.col_count]].groupby([self.col])
if self.cont_col is not None:
if self._el_in_stats_names({"sum", "mean", "std", "var"}):
sums_part = groups.sum()
self.sums_host.append(sums_part.to_pandas())
if self._el_in_stats_names({"std", "var"}):
var_part = groups.std(ddof=self.ddof) ** 2
self.vars_host.append(var_part.to_pandas())
if self._el_in_stats_names({"count", "mean", "std", "var"}):
counts_part = groups.count()
self.counts_host.append(counts_part.to_pandas())
def fit_finalize(self):
"""
Finalizes the stats calculation.
"""
self.stats = pd.DataFrame()
if "count" in self.stats_names and not (self._el_in_stats_names({"mean", "std", "var"})):
counts_dev = cudf.DataFrame([])
for i in range(len(self.counts_host)):
counts_part = cudf.from_pandas(self.counts_host.pop())
if counts_dev.shape[0] == 0:
counts_dev = counts_part
else:
counts_dev = counts_dev.add(counts_part, fill_value=0)
self.counts = counts_dev.to_pandas()
new_col = self.col + "_count"
self.stats[new_col] = self.counts[self.col_count]
if self.cont_col is not None:
if "sum" in self.stats_names and not (self._el_in_stats_names({"mean", "std", "var"})):
sums_dev = cudf.DataFrame([])
for i in range(len(self.sums_host)):
sums_part = cudf.from_pandas(self.sums_host.pop())
if sums_dev.shape[0] == 0:
sums_dev = sums_part
else:
sums_dev = sums_dev.add(sums_part, fill_value=0)
self.sums = sums_dev.to_pandas()
for cont_name in self.cont_col:
new_col = self.col + "_" + cont_name + "_sum"
self.stats[new_col] = self.sums[cont_name]
if self._el_in_stats_names({"mean", "std", "var"}):
sums_dev = cudf.DataFrame([])
counts_dev = cudf.DataFrame([])
if self._el_in_stats_names({"std", "var"}):
var_dev = cudf.DataFrame([])
for i in range(len(self.sums_host)):
sums_part = cudf.from_pandas(self.sums_host.pop())
counts_part = cudf.from_pandas(self.counts_host.pop())
if self._el_in_stats_names({"std", "var"}):
var_part = cudf.from_pandas(self.vars_host.pop())
if i == 0:
counts_dev = counts_part
sums_dev = sums_part
if self._el_in_stats_names({"std", "var"}):
var_dev = var_part
else:
if self._el_in_stats_names({"std", "var"}):
# n1*v1
var_dev = var_dev.mul(counts_dev)
# n2*v2
var_dev = var_dev.add(var_part.mul(counts_part), fill_value=0)
# n1*(m1-m12)**2
m12_tmp = sums_dev.add(sums_part, fill_value=0)
m12_tmp = m12_tmp.mul(1 / (counts_dev.add(counts_part, fill_value=0)))
var_dev = var_dev.add(
counts_dev.mul(
((sums_dev.mul(1 / counts_dev)).add(-1 * m12_tmp, fill_value=0))
** 2
),
fill_value=0,
)
var_dev = var_dev.add(
counts_part.mul(
(sums_part.mul(1 / counts_part).add(-1 * m12_tmp, fill_value=0))
** 2
),
fill_value=0,
)
del m12_tmp
counts_dev = counts_dev.add(counts_part, fill_value=0)
sums_dev = sums_dev.add(sums_part, fill_value=0)
if self._el_in_stats_names({"std", "var"}):
var_dev = var_dev.mul(1 / counts_dev)
result_map = {}
if "count" in self.stats_names:
self.counts = counts_dev.to_pandas()
result_map["count"] = self.counts
if "sum" in self.stats_names:
self.sums = sums_dev.to_pandas()
result_map["sum"] = self.sums
if "mean" in self.stats_names:
mean_dev = sums_dev.mul(1 / counts_dev)
self.mean = mean_dev.to_pandas()
result_map["mean"] = self.mean
if "var" in self.stats_names:
self.var = var_dev.to_pandas()
result_map["var"] = self.var
if "std" in self.stats_names:
self.std = var_dev.sqrt().to_pandas()
result_map["std"] = self.std
for cont_name in self.cont_col:
for su_op in self.supported_ops:
if su_op in self.stats_names:
if su_op == "count":
new_col = self.col + "_count"
self.stats[new_col] = result_map[su_op][cont_name]
else:
new_col = self.col + "_" + cont_name + "_" + su_op
self.stats[new_col] = result_map[su_op][cont_name]
self.stats[self.col] = self.stats.index
self.stats.reset_index(drop=True, inplace=True)
return self.stats.shape[0]
| 39.195364 | 100 | 0.530709 | #
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import cudf
import cupy as cp
import pandas as pd
import rmm
class GroupByMomentsCal(object):
"""
This is the class that GroupByMoments uses to
calculate the basic statistics of the data that
is grouped by a categorical feature.
Parameters
-----------
col : str
column name
col_count : str
column name to get group counts
cont_col : list of str
pre-calculated unique values.
stats : list of str or set of str, default ['count']
count of groups = ['count']
sum of cont_col = ['sum']
mean of cont_col = ['mean']
var of cont_col = ['var']
std of cont_col = ['std']
limit_frac : float, default 0.1
fraction of memory to use during unique id calculation.
gpu_mem_util_limit : float, default 0.8
GPU memory utilization limit during frequency based
calculation. If limit is exceeded, unique ids are moved
to host memory.
gpu_mem_trans_use : float, default 0.8
GPU memory utilization limit during transformation. How much
GPU memory will be used during transformation is calculated
using this parameter.
order_column_name : str, default "order-nvtabular"
a column name to be used to preserve the order of input data.
cudf's merge function doesn't preserve the order of the data
and this column name is used to create a column with integer
values in ascending order.
ddof : int, default "1"
Delta Degrees of Freedom. The divisor used in calculations is
N - ddof, where N represents the number of elements.
"""
def __init__(
self,
col,
col_count,
cont_col,
stats=["count"],
limit_frac=0.1,
gpu_mem_util_limit=0.8,
gpu_mem_trans_use=0.8,
order_column_name="order-nvtabular",
ddof=1,
):
if col is None:
raise ValueError("cat_names cannot be None for group by operations.")
if cont_col is None:
if "count" not in stats:
raise ValueError(
"count operations is only supported when there is no continuous columns."
)
self.supported_ops = ["count", "sum", "mean", "var", "std"]
for ops in stats:
if ops not in self.supported_ops:
raise ValueError(ops + " operation is not supported.")
self.col = col
if isinstance(cont_col, str):
cont_col = [cont_col]
self.col_count = col_count
self.cont_col = cont_col
if isinstance(stats, list):
stats = set(stats)
self.stats_names = stats
self.limit_frac = limit_frac
self.gpu_mem_util_limit = gpu_mem_util_limit
self.gpu_mem_trans_use = gpu_mem_trans_use
self.order_column_name = order_column_name
self.means_host = []
self.sums_host = []
self.vars_host = []
self.counts_host = []
self.ddof = ddof
def merge(self, gdf):
"""
Merges gdf with the calculated group stats.
Parameters
-----------
gdf : cudf DataFrame
Returns
-----------
stats_joined: cudf DataFrame
"""
order = cudf.Series(cp.arange(gdf.shape[0]))
gdf[self.order_column_name] = order
col_names = []
if self.cont_col is not None:
for i in range(len(self.cont_col)):
col_prefix = f"{self.col}_{self.cont_col[i]}_"
col_names.extend(col_prefix + stat for stat in self.stats_names if stat != "count")
if "count" in self.stats_names:
col_names.append(self.col + "_count")
avail_gpu_mem = rmm.get_info().free
sub_stats_size = int(avail_gpu_mem * self.gpu_mem_trans_use / (self.stats.shape[1] * 8))
if sub_stats_size == 0:
sub_stats_size = 1
stats_joined = None
i = 0
while i < self.stats.shape[0]:
sub_stats = cudf.from_pandas(self.stats.iloc[i : i + sub_stats_size])
joined = gdf[[self.col, self.order_column_name]].merge(
sub_stats, on=[self.col], how="left"
)
joined = joined.sort_values(self.order_column_name)
joined.reset_index(drop=True, inplace=True)
if stats_joined is None:
stats_joined = joined[col_names].copy()
else:
stats_joined = stats_joined.add(joined[col_names], fill_value=0)
i = i + sub_stats_size
joined = cudf.Series([])
gdf.drop(columns=[self.order_column_name], inplace=True)
return stats_joined[col_names]
def fit(self, gdf):
"""
Calculates the requested group stats of gdf and
stores results in the host memory.
Parameters
-----------
gdf : cudf DataFrame
"""
if self.cont_col is None:
groups = gdf[[self.col] + [self.col_count]].groupby([self.col])
else:
groups = gdf[[self.col] + self.cont_col + [self.col_count]].groupby([self.col])
if self.cont_col is not None:
if self._el_in_stats_names({"sum", "mean", "std", "var"}):
sums_part = groups.sum()
self.sums_host.append(sums_part.to_pandas())
if self._el_in_stats_names({"std", "var"}):
var_part = groups.std(ddof=self.ddof) ** 2
self.vars_host.append(var_part.to_pandas())
if self._el_in_stats_names({"count", "mean", "std", "var"}):
counts_part = groups.count()
self.counts_host.append(counts_part.to_pandas())
def _el_in_stats_names(self, elements):
return not self.stats_names.isdisjoint(elements)
def fit_finalize(self):
"""
Finalizes the stats calculation.
"""
self.stats = pd.DataFrame()
if "count" in self.stats_names and not (self._el_in_stats_names({"mean", "std", "var"})):
counts_dev = cudf.DataFrame([])
for i in range(len(self.counts_host)):
counts_part = cudf.from_pandas(self.counts_host.pop())
if counts_dev.shape[0] == 0:
counts_dev = counts_part
else:
counts_dev = counts_dev.add(counts_part, fill_value=0)
self.counts = counts_dev.to_pandas()
new_col = self.col + "_count"
self.stats[new_col] = self.counts[self.col_count]
if self.cont_col is not None:
if "sum" in self.stats_names and not (self._el_in_stats_names({"mean", "std", "var"})):
sums_dev = cudf.DataFrame([])
for i in range(len(self.sums_host)):
sums_part = cudf.from_pandas(self.sums_host.pop())
if sums_dev.shape[0] == 0:
sums_dev = sums_part
else:
sums_dev = sums_dev.add(sums_part, fill_value=0)
self.sums = sums_dev.to_pandas()
for cont_name in self.cont_col:
new_col = self.col + "_" + cont_name + "_sum"
self.stats[new_col] = self.sums[cont_name]
if self._el_in_stats_names({"mean", "std", "var"}):
sums_dev = cudf.DataFrame([])
counts_dev = cudf.DataFrame([])
if self._el_in_stats_names({"std", "var"}):
var_dev = cudf.DataFrame([])
for i in range(len(self.sums_host)):
sums_part = cudf.from_pandas(self.sums_host.pop())
counts_part = cudf.from_pandas(self.counts_host.pop())
if self._el_in_stats_names({"std", "var"}):
var_part = cudf.from_pandas(self.vars_host.pop())
if i == 0:
counts_dev = counts_part
sums_dev = sums_part
if self._el_in_stats_names({"std", "var"}):
var_dev = var_part
else:
if self._el_in_stats_names({"std", "var"}):
# n1*v1
var_dev = var_dev.mul(counts_dev)
# n2*v2
var_dev = var_dev.add(var_part.mul(counts_part), fill_value=0)
# n1*(m1-m12)**2
m12_tmp = sums_dev.add(sums_part, fill_value=0)
m12_tmp = m12_tmp.mul(1 / (counts_dev.add(counts_part, fill_value=0)))
var_dev = var_dev.add(
counts_dev.mul(
((sums_dev.mul(1 / counts_dev)).add(-1 * m12_tmp, fill_value=0))
** 2
),
fill_value=0,
)
var_dev = var_dev.add(
counts_part.mul(
(sums_part.mul(1 / counts_part).add(-1 * m12_tmp, fill_value=0))
** 2
),
fill_value=0,
)
del m12_tmp
counts_dev = counts_dev.add(counts_part, fill_value=0)
sums_dev = sums_dev.add(sums_part, fill_value=0)
if self._el_in_stats_names({"std", "var"}):
var_dev = var_dev.mul(1 / counts_dev)
result_map = {}
if "count" in self.stats_names:
self.counts = counts_dev.to_pandas()
result_map["count"] = self.counts
if "sum" in self.stats_names:
self.sums = sums_dev.to_pandas()
result_map["sum"] = self.sums
if "mean" in self.stats_names:
mean_dev = sums_dev.mul(1 / counts_dev)
self.mean = mean_dev.to_pandas()
result_map["mean"] = self.mean
if "var" in self.stats_names:
self.var = var_dev.to_pandas()
result_map["var"] = self.var
if "std" in self.stats_names:
self.std = var_dev.sqrt().to_pandas()
result_map["std"] = self.std
for cont_name in self.cont_col:
for su_op in self.supported_ops:
if su_op in self.stats_names:
if su_op == "count":
new_col = self.col + "_count"
self.stats[new_col] = result_map[su_op][cont_name]
else:
new_col = self.col + "_" + cont_name + "_" + su_op
self.stats[new_col] = result_map[su_op][cont_name]
self.stats[self.col] = self.stats.index
self.stats.reset_index(drop=True, inplace=True)
return self.stats.shape[0]
| 1,438 | 0 | 54 |
1c6767b3796c48b41209477099da1adff5686cd7 | 662 | py | Python | migrations/versions/825d555af49_bill_versions_enacted.py | havanhuy1997/pmg-cms-2 | 21571235cf3d9552013bca29ab9af288b08e00d6 | [
"Apache-2.0"
] | 2 | 2019-06-11T20:46:43.000Z | 2020-08-27T22:50:32.000Z | migrations/versions/825d555af49_bill_versions_enacted.py | havanhuy1997/pmg-cms-2 | 21571235cf3d9552013bca29ab9af288b08e00d6 | [
"Apache-2.0"
] | 70 | 2017-05-26T14:04:06.000Z | 2021-06-30T10:21:58.000Z | migrations/versions/825d555af49_bill_versions_enacted.py | havanhuy1997/pmg-cms-2 | 21571235cf3d9552013bca29ab9af288b08e00d6 | [
"Apache-2.0"
] | 4 | 2017-08-29T10:09:30.000Z | 2021-05-25T11:29:03.000Z | """bill-versions-enacted
Revision ID: 825d555af49
Revises: 3101ec185bdf
Create Date: 2015-12-08 08:35:52.703472
"""
# revision identifiers, used by Alembic.
revision = '825d555af49'
down_revision = '3101ec185bdf'
from alembic import op
import sqlalchemy as sa
| 24.518519 | 120 | 0.705438 | """bill-versions-enacted
Revision ID: 825d555af49
Revises: 3101ec185bdf
Create Date: 2015-12-08 08:35:52.703472
"""
# revision identifiers, used by Alembic.
revision = '825d555af49'
down_revision = '3101ec185bdf'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('bill_versions', sa.Column('enacted', sa.Boolean(), server_default=sa.text(u'false'), nullable=False))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('bill_versions', 'enacted')
### end Alembic commands ###
| 350 | 0 | 46 |
15bb3f8c0b20ec4171272b2b9585e828d86f8694 | 1,165 | py | Python | core/mail.py | Shaffron/qtpie | d41e719477558584aea14fc8724d0faf01dab2bd | [
"MIT"
] | null | null | null | core/mail.py | Shaffron/qtpie | d41e719477558584aea14fc8724d0faf01dab2bd | [
"MIT"
] | 10 | 2020-03-31T09:54:38.000Z | 2021-08-23T20:28:52.000Z | core/mail.py | Shaffron/qtpie | d41e719477558584aea14fc8724d0faf01dab2bd | [
"MIT"
] | null | null | null | import smtplib
from email.mime.text import MIMEText
'''
이거 버리고 django.core.mail 사용하기
from django.core.mail import send_mail
send_mail(
'Subject here',
'Here is the message.',
'from@example.com',
['to@example.com'],
fail_silently=False,
)
[ settings.py 설정값들 ]
EMAIL_BACKEND
EMAIL_FILE_PATH
EMAIL_HOST
EMAIL_HOST_PASSWORD
EMAIL_HOST_USER
EMAIL_PORT
EMAIL_SSL_CERTFILE
EMAIL_SSL_KEYFILE
EMAIL_SUBJECT_PREFIX
EMAIL_TIMEOUT
EMAIL_USE_LOCALTIME
''' | 19.416667 | 56 | 0.656652 | import smtplib
from email.mime.text import MIMEText
'''
이거 버리고 django.core.mail 사용하기
from django.core.mail import send_mail
send_mail(
'Subject here',
'Here is the message.',
'from@example.com',
['to@example.com'],
fail_silently=False,
)
[ settings.py 설정값들 ]
EMAIL_BACKEND
EMAIL_FILE_PATH
EMAIL_HOST
EMAIL_HOST_PASSWORD
EMAIL_HOST_USER
EMAIL_PORT
EMAIL_SSL_CERTFILE
EMAIL_SSL_KEYFILE
EMAIL_SUBJECT_PREFIX
EMAIL_TIMEOUT
EMAIL_USE_LOCALTIME
'''
class Mail:
HOST = 'smtp.gmail.com'
USER = 'with.alpha.and.omega'
PASSWORD = 'llblysryeqhiunsm'
FROM = 'Daily QT Bot'
TO = '18vs1004@naver.com'
@classmethod
def get_smtp_server(self):
smtp = smtplib.SMTP(self.HOST, 587)
smtp.ehlo()
smtp.starttls()
smtp.login(self.USER, self.PASSWORD)
return smtp
@classmethod
def send_mail(cls, subject, message, to=None):
smtp = cls.get_smtp_server()
if not to:
to = cls.TO
payload = MIMEText(message)
payload['Subject'] = subject
payload['To'] = to
smtp.sendmail(cls.FROM, to, payload.as_string())
smtp.close() | 445 | 231 | 22 |
4c23276f93dc613fd18302346d3e93c749f7dadc | 3,746 | py | Python | core/settings.py | kalaLokia/fbr_live_production | e793a00356adbc1823b896ce7a3c0a58001eb011 | [
"MIT"
] | null | null | null | core/settings.py | kalaLokia/fbr_live_production | e793a00356adbc1823b896ce7a3c0a58001eb011 | [
"MIT"
] | null | null | null | core/settings.py | kalaLokia/fbr_live_production | e793a00356adbc1823b896ce7a3c0a58001eb011 | [
"MIT"
] | null | null | null | """
Main configurations of the application, this file needed to be loaded initially.
"""
import configparser
import os
from . import ROOT, CONNECTION_STRING, LOG_SUNDAY, MIN_PRODUCTION, PRODUCTION_START_HOUR
from .log_me import logMessage
if not os.path.exists(ROOT):
os.makedirs(ROOT)
SLACK_WH = None
DISCORD_WH = None
GOOGLE_WH = None
SLACK_APP_TOKEN = None
SLACK_CHANNEL_ID = None
DISPLAY_HOUR_COUNT = 0
DATABASE_NAME = "barcode" # default
is_api_available = False
config = configparser.ConfigParser(interpolation=None)
exists = config.read(ROOT + "config.ini")
if exists:
if config.has_section("SQL SERVER"):
try:
DATABASE_NAME = config["SQL SERVER"]["DATABASE"]
CONNECTION_STRING = (
r"Driver={ODBC Driver 17 for SQL Server};"
rf'Server={config["SQL SERVER"]["SERVER"]};'
rf"Database={DATABASE_NAME};"
rf'uid={config["SQL SERVER"]["UID"]};'
rf'pwd={config["SQL SERVER"]["PWD"]};'
r"Integrated Security=false;"
)
except KeyError as e:
CONNECTION_STRING = None
logMessage(f'Required key "{e.args[0]}" not found in configurations.')
if config.has_section("SLACK APP"):
try:
SLACK_APP_TOKEN = config["SLACK APP"]["BOT_TOKEN"]
SLACK_CHANNEL_ID = config["SLACK APP"]["CHANNEL_ID"]
if not SLACK_APP_TOKEN.startswith("xoxb"):
SLACK_APP_TOKEN = None
else:
is_api_available = True
except KeyError as e:
SLACK_APP_TOKEN = None
logMessage(f'Required key "{e.args[0]}" not found in configurations.')
if config.has_option("WEBHOOK", "SLACK"):
value = config.get("WEBHOOK", "SLACK")
if value.startswith("https://hooks.slack.com/services/"):
SLACK_WH = value
is_api_available = True
if config.has_option("WEBHOOK", "DISCORD"):
value = config.get("WEBHOOK", "DISCORD")
if value.startswith("https://discord"):
DISCORD_WH = value
is_api_available = True
if config.has_option("WEBHOOK", "GOOGLE"):
value = config.get("WEBHOOK", "GOOGLE")
if value.startswith("https://chat.googleapis.com"):
GOOGLE_WH = value
is_api_available = True
if config.has_option("GENERAL", "SUNDAY_ENABLE"):
value = config.get("GENERAL", "SUNDAY_ENABLE")
try:
if int(value) != 0:
LOG_SUNDAY = True
except:
pass # Default value will consider
if config.has_option("GENERAL", "MIN_PRODUCTION_LOGGING"):
value = config.get("GENERAL", "MIN_PRODUCTION_LOGGING")
try:
if int(value) > 0:
MIN_PRODUCTION = int(value)
except:
pass # Default value will consider
if config.has_option("GENERAL", "PRODUCTION_START_HOUR"):
value = config.get("GENERAL", "PRODUCTION_START_HOUR")
try:
value = int(value)
if value >= 0 and value < 24:
PRODUCTION_START_HOUR = value
except:
pass # Default value will consider
if config.has_option("GENERAL", "DISPLAY_HOUR_COUNT"):
value = config.get("GENERAL", "DISPLAY_HOUR_COUNT")
try:
value = int(value)
if value == 1:
DISPLAY_HOUR_COUNT = value
except:
pass # Default value will consider
if not is_api_available:
logMessage("No valid webhook configurations found. Failed to sent report.")
else:
CONNECTION_STRING = None
logMessage("Configuration file missing, Exiting..!") # Then do not run
| 32.573913 | 88 | 0.599306 | """
Main configurations of the application, this file needed to be loaded initially.
"""
import configparser
import os
from . import ROOT, CONNECTION_STRING, LOG_SUNDAY, MIN_PRODUCTION, PRODUCTION_START_HOUR
from .log_me import logMessage
if not os.path.exists(ROOT):
os.makedirs(ROOT)
SLACK_WH = None
DISCORD_WH = None
GOOGLE_WH = None
SLACK_APP_TOKEN = None
SLACK_CHANNEL_ID = None
DISPLAY_HOUR_COUNT = 0
DATABASE_NAME = "barcode" # default
is_api_available = False
config = configparser.ConfigParser(interpolation=None)
exists = config.read(ROOT + "config.ini")
if exists:
if config.has_section("SQL SERVER"):
try:
DATABASE_NAME = config["SQL SERVER"]["DATABASE"]
CONNECTION_STRING = (
r"Driver={ODBC Driver 17 for SQL Server};"
rf'Server={config["SQL SERVER"]["SERVER"]};'
rf"Database={DATABASE_NAME};"
rf'uid={config["SQL SERVER"]["UID"]};'
rf'pwd={config["SQL SERVER"]["PWD"]};'
r"Integrated Security=false;"
)
except KeyError as e:
CONNECTION_STRING = None
logMessage(f'Required key "{e.args[0]}" not found in configurations.')
if config.has_section("SLACK APP"):
try:
SLACK_APP_TOKEN = config["SLACK APP"]["BOT_TOKEN"]
SLACK_CHANNEL_ID = config["SLACK APP"]["CHANNEL_ID"]
if not SLACK_APP_TOKEN.startswith("xoxb"):
SLACK_APP_TOKEN = None
else:
is_api_available = True
except KeyError as e:
SLACK_APP_TOKEN = None
logMessage(f'Required key "{e.args[0]}" not found in configurations.')
if config.has_option("WEBHOOK", "SLACK"):
value = config.get("WEBHOOK", "SLACK")
if value.startswith("https://hooks.slack.com/services/"):
SLACK_WH = value
is_api_available = True
if config.has_option("WEBHOOK", "DISCORD"):
value = config.get("WEBHOOK", "DISCORD")
if value.startswith("https://discord"):
DISCORD_WH = value
is_api_available = True
if config.has_option("WEBHOOK", "GOOGLE"):
value = config.get("WEBHOOK", "GOOGLE")
if value.startswith("https://chat.googleapis.com"):
GOOGLE_WH = value
is_api_available = True
if config.has_option("GENERAL", "SUNDAY_ENABLE"):
value = config.get("GENERAL", "SUNDAY_ENABLE")
try:
if int(value) != 0:
LOG_SUNDAY = True
except:
pass # Default value will consider
if config.has_option("GENERAL", "MIN_PRODUCTION_LOGGING"):
value = config.get("GENERAL", "MIN_PRODUCTION_LOGGING")
try:
if int(value) > 0:
MIN_PRODUCTION = int(value)
except:
pass # Default value will consider
if config.has_option("GENERAL", "PRODUCTION_START_HOUR"):
value = config.get("GENERAL", "PRODUCTION_START_HOUR")
try:
value = int(value)
if value >= 0 and value < 24:
PRODUCTION_START_HOUR = value
except:
pass # Default value will consider
if config.has_option("GENERAL", "DISPLAY_HOUR_COUNT"):
value = config.get("GENERAL", "DISPLAY_HOUR_COUNT")
try:
value = int(value)
if value == 1:
DISPLAY_HOUR_COUNT = value
except:
pass # Default value will consider
if not is_api_available:
logMessage("No valid webhook configurations found. Failed to sent report.")
else:
CONNECTION_STRING = None
logMessage("Configuration file missing, Exiting..!") # Then do not run
| 0 | 0 | 0 |
abe28dbb7a9606c85b5e2ef9fa54be539aebaec3 | 544 | py | Python | apps/comics/migrations/0041_auto_20200621_1553.py | pennomi/comics | 1ec4a8a28a7ab8fe6590b9c95ca240e83b1f4186 | [
"MIT"
] | 50 | 2018-09-14T20:43:07.000Z | 2022-02-02T03:16:12.000Z | apps/comics/migrations/0041_auto_20200621_1553.py | pennomi/comics | 1ec4a8a28a7ab8fe6590b9c95ca240e83b1f4186 | [
"MIT"
] | null | null | null | apps/comics/migrations/0041_auto_20200621_1553.py | pennomi/comics | 1ec4a8a28a7ab8fe6590b9c95ca240e83b1f4186 | [
"MIT"
] | 7 | 2018-10-16T19:22:55.000Z | 2022-01-05T02:01:44.000Z | # Generated by Django 3.0.7 on 2020-06-21 15:53
from django.db import migrations, models
| 23.652174 | 63 | 0.612132 | # Generated by Django 3.0.7 on 2020-06-21 15:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('comics', '0040_linkedsocialplatform_requires_money'),
]
operations = [
migrations.RemoveField(
model_name='linkedsocialplatform',
name='requires_money',
),
migrations.AddField(
model_name='socialplatform',
name='requires_money',
field=models.BooleanField(default=False),
),
]
| 0 | 430 | 23 |
d9d4ad5b8efeb904f43847d591df9604686969cc | 1,626 | py | Python | tests/snc/agents/hedgehog/asymptotic_covariance/test_computer_asymptotic_cov_bernoulli_service_interface.py | dmcnamee/snc | c2da8c1e9ecdc42c59b9de73224b3d50ee1c9786 | [
"Apache-2.0"
] | 5 | 2021-03-24T16:23:10.000Z | 2021-11-17T12:44:51.000Z | tests/snc/agents/hedgehog/asymptotic_covariance/test_computer_asymptotic_cov_bernoulli_service_interface.py | dmcnamee/snc | c2da8c1e9ecdc42c59b9de73224b3d50ee1c9786 | [
"Apache-2.0"
] | 3 | 2021-03-26T01:16:08.000Z | 2021-05-08T22:06:47.000Z | tests/snc/agents/hedgehog/asymptotic_covariance/test_computer_asymptotic_cov_bernoulli_service_interface.py | dmcnamee/snc | c2da8c1e9ecdc42c59b9de73224b3d50ee1c9786 | [
"Apache-2.0"
] | 2 | 2021-03-24T17:20:06.000Z | 2021-04-19T09:01:12.000Z | import numpy as np
import pytest
from snc.agents.hedgehog.asymptotic_workload_cov.\
compute_asymptotic_cov_bernoulli_service_and_arrivals \
import ComputeAsymptoticCovBernoulliServiceAndArrivals
from snc.agents.hedgehog.asymptotic_workload_cov.\
compute_asymptotic_cov_bernoulli_service_poisson_arrivals \
import ComputeAsymptoticCovBernoulliServicePoissonArrivals
import snc.agents.hedgehog.workload.workload as workload
from snc.environments import examples
@pytest.fixture(params=[ComputeAsymptoticCovBernoulliServicePoissonArrivals,
ComputeAsymptoticCovBernoulliServiceAndArrivals])
| 39.658537 | 100 | 0.830873 | import numpy as np
import pytest
from snc.agents.hedgehog.asymptotic_workload_cov.\
compute_asymptotic_cov_bernoulli_service_and_arrivals \
import ComputeAsymptoticCovBernoulliServiceAndArrivals
from snc.agents.hedgehog.asymptotic_workload_cov.\
compute_asymptotic_cov_bernoulli_service_poisson_arrivals \
import ComputeAsymptoticCovBernoulliServicePoissonArrivals
import snc.agents.hedgehog.workload.workload as workload
from snc.environments import examples
@pytest.fixture(params=[ComputeAsymptoticCovBernoulliServicePoissonArrivals,
ComputeAsymptoticCovBernoulliServiceAndArrivals])
def class_fixture(request):
return request.param
def test_compute_variance_single_entry_service_process_one(class_fixture):
p = 1
assert class_fixture.compute_variance_single_entry_service_process(p) == 0
def test_compute_variance_single_entry_service_process_zero(class_fixture):
p = 0
assert class_fixture.compute_variance_single_entry_service_process(p) == 0
def test_compute_variance_single_entry_service_process(class_fixture):
p = 0.5
assert class_fixture.compute_variance_single_entry_service_process(p) == 0.25
def test_compute_asymptotic_cov_service_process_non_orthogonal_constituency_matrix(class_fixture):
env = examples.double_reentrant_line_only_shared_resources_model(initial_state=np.zeros((4, 1)))
workload_tuple = workload.compute_load_workload_matrix(env)
env.constituency_matrix[0, 1] = 1
with pytest.raises(AssertionError):
_ = class_fixture(env.job_generator, env.constituency_matrix, workload_tuple.workload_mat)
| 878 | 0 | 114 |
53db234b40ed5f4ee788777a29aedb4f2a88eef7 | 1,421 | py | Python | 2021/scratchpad/day_24_wolfram1.py | apple-phi/Advent-of-Code | 7227b5692eb456f23afd1a9f93a44c48436cb1ad | [
"Unlicense"
] | null | null | null | 2021/scratchpad/day_24_wolfram1.py | apple-phi/Advent-of-Code | 7227b5692eb456f23afd1a9f93a44c48436cb1ad | [
"Unlicense"
] | null | null | null | 2021/scratchpad/day_24_wolfram1.py | apple-phi/Advent-of-Code | 7227b5692eb456f23afd1a9f93a44c48436cb1ad | [
"Unlicense"
] | null | null | null | from typing import *
with open("i") as f:
data = f.read().splitlines()
print(simplify_data(data)["z"])
| 29.604167 | 81 | 0.564391 | from typing import *
class CustomDict(dict):
def __getitem__(self, key):
if key not in self:
return int(key)
return super().__getitem__(key)
def parse_instr(instr: str, *params: str, var_map: dict, input_iter: Iterator):
if instr == "inp":
var_map[params[0]] = next(input_iter)
elif instr == "add":
var_map[params[0]] = f"({var_map[params[0]]} + {var_map[params[1]]})"
elif instr == "mul":
var_map[params[0]] = f"({var_map[params[0]]} * {var_map[params[1]]})"
elif instr == "div":
var_map[
params[0]
] = f"(IntegerPart[{var_map[params[0]]} / {var_map[params[1]]}])"
elif instr == "mod":
var_map[params[0]] = f"(Mod[{var_map[params[0]]}, {var_map[params[1]]}])"
elif instr == "eql":
var_map[params[0]] = "(Piecewise[{{1, %s == %s}}])" % (
var_map[params[0]],
var_map[params[1]],
)
else:
raise NotImplementedError(f"Cannot parse instruction `{instr}`")
with open("i") as f:
data = f.read().splitlines()
def simplify_data(data: List[str]):
variables = CustomDict({"w": "0", "x": "0", "y": "0", "z": "0"})
inputs = iter("abcdefghijklmn")
for line in data:
print(line)
parse_instr(*line.split(), var_map=variables, input_iter=inputs)
assert not tuple(inputs)
return variables
print(simplify_data(data)["z"])
| 1,210 | 2 | 95 |
f8b368344adb9fdf26b8972dbfb79fe854662c67 | 125 | py | Python | home/urls.py | mdpe-ir/mdCms | 69aea3687a2b9d7846b196c00a0cd3866c54fd4c | [
"BSD-3-Clause-Attribution"
] | 1 | 2021-03-09T19:03:35.000Z | 2021-03-09T19:03:35.000Z | home/urls.py | mdpe-ir/mdCms | 69aea3687a2b9d7846b196c00a0cd3866c54fd4c | [
"BSD-3-Clause-Attribution"
] | null | null | null | home/urls.py | mdpe-ir/mdCms | 69aea3687a2b9d7846b196c00a0cd3866c54fd4c | [
"BSD-3-Clause-Attribution"
] | 1 | 2021-11-13T06:04:33.000Z | 2021-11-13T06:04:33.000Z | from django.urls import path
from .views import *
app_name = 'home'
urlpatterns = [
path('<slug>', index, name='home'),
]
| 17.857143 | 36 | 0.664 | from django.urls import path
from .views import *
app_name = 'home'
urlpatterns = [
path('<slug>', index, name='home'),
]
| 0 | 0 | 0 |
9458a24a25f2d4c036eb618cfbe0ca2e7bcbc16f | 141 | py | Python | fusesoc/capi2/__init__.py | idex-biometrics/fusesoc | 58bbb864723112e9bfd7e02a17749800225815e9 | [
"BSD-2-Clause"
] | 829 | 2015-03-10T12:28:42.000Z | 2022-03-28T02:44:12.000Z | fusesoc/capi2/__init__.py | idex-biometrics/fusesoc | 58bbb864723112e9bfd7e02a17749800225815e9 | [
"BSD-2-Clause"
] | 460 | 2015-01-26T18:03:19.000Z | 2022-03-30T08:30:41.000Z | fusesoc/capi2/__init__.py | idex-biometrics/fusesoc | 58bbb864723112e9bfd7e02a17749800225815e9 | [
"BSD-2-Clause"
] | 177 | 2015-02-02T13:58:12.000Z | 2022-03-30T20:56:21.000Z | # Copyright FuseSoC contributors
# Licensed under the 2-Clause BSD License, see LICENSE for details.
# SPDX-License-Identifier: BSD-2-Clause
| 35.25 | 67 | 0.794326 | # Copyright FuseSoC contributors
# Licensed under the 2-Clause BSD License, see LICENSE for details.
# SPDX-License-Identifier: BSD-2-Clause
| 0 | 0 | 0 |
638e099a8fe7bfbdb8ba7e6197e7eb8943cc0296 | 819 | py | Python | examples/async_experiment_3.py | sandutsar/pyinstrument | d3c45164a385021f366c1081baec18a1a226a573 | [
"BSD-3-Clause"
] | 3,768 | 2015-01-04T07:44:17.000Z | 2022-03-30T16:18:32.000Z | examples/async_experiment_3.py | sandutsar/pyinstrument | d3c45164a385021f366c1081baec18a1a226a573 | [
"BSD-3-Clause"
] | 136 | 2015-01-21T22:07:46.000Z | 2022-03-24T20:27:14.000Z | examples/async_experiment_3.py | sandutsar/pyinstrument | d3c45164a385021f366c1081baec18a1a226a573 | [
"BSD-3-Clause"
] | 177 | 2015-01-15T19:07:19.000Z | 2022-03-25T15:00:32.000Z | import asyncio
import time
import trio
import pyinstrument
trio.run(task)
| 18.2 | 63 | 0.628816 | import asyncio
import time
import trio
import pyinstrument
def do_nothing():
pass
def busy_wait(duration):
end_time = time.time() + duration
while time.time() < end_time:
do_nothing()
async def say(what, when, profile=False):
if profile:
p = pyinstrument.Profiler()
p.start()
busy_wait(0.1)
sleep_start = time.time()
await trio.sleep(when)
print(f"slept for {time.time() - sleep_start:.3f} seconds")
busy_wait(0.1)
print(what)
if profile:
p.stop()
p.print(show_all=True)
async def task():
async with trio.open_nursery() as nursery:
nursery.start_soon(say, "first hello", 2, True)
nursery.start_soon(say, "second hello", 1, True)
nursery.start_soon(say, "third hello", 3, True)
trio.run(task)
| 645 | 0 | 92 |
160e8c205570d971714c7b71e9230402530f0d1c | 1,467 | py | Python | cibuildwheel/util.py | GjjvdBurg/cibuildwheel | 5f416b194a488b79aaab7a696e23ab6958f99932 | [
"BSD-2-Clause"
] | 1 | 2021-07-07T14:30:22.000Z | 2021-07-07T14:30:22.000Z | cibuildwheel/util.py | josh146/cibuildwheel | 38b3b0a3dad69d4c3989fe5a524020166390c293 | [
"BSD-2-Clause"
] | null | null | null | cibuildwheel/util.py | josh146/cibuildwheel | 38b3b0a3dad69d4c3989fe5a524020166390c293 | [
"BSD-2-Clause"
] | 2 | 2019-07-16T02:06:43.000Z | 2019-11-17T02:30:54.000Z | from fnmatch import fnmatch
import warnings
def prepare_command(command, project):
'''
Preprocesses a command by expanding variables like {project}.
For example, used in the test_command option, to specify the path to the
tests directory.
'''
return command.format(python='python', pip='pip', project=project)
# Taken from https://stackoverflow.com/a/107717
| 27.679245 | 111 | 0.657805 | from fnmatch import fnmatch
import warnings
def prepare_command(command, project):
'''
Preprocesses a command by expanding variables like {project}.
For example, used in the test_command option, to specify the path to the
tests directory.
'''
return command.format(python='python', pip='pip', project=project)
def get_build_verbosity_extra_flags(level):
if level > 0:
return ['-' + level * 'v']
elif level < 0:
return ['-' + -level * 'q']
else:
return []
class BuildSelector(object):
def __init__(self, build_config, skip_config):
self.build_patterns = build_config.split()
self.skip_patterns = skip_config.split()
def __call__(self, build_id):
def match_any(patterns):
return any(fnmatch(build_id, pattern) for pattern in patterns)
return match_any(self.build_patterns) and not match_any(self.skip_patterns)
def __repr__(self):
return 'BuildSelector({!r} - {!r})'.format(' '.join(self.build_patterns), ' '.join(self.skip_patterns))
# Taken from https://stackoverflow.com/a/107717
class Unbuffered(object):
def __init__(self, stream):
self.stream = stream
def write(self, data):
self.stream.write(data)
self.stream.flush()
def writelines(self, datas):
self.stream.writelines(datas)
self.stream.flush()
def __getattr__(self, attr):
return getattr(self.stream, attr)
| 812 | 11 | 255 |
dffefedf4722660bfa1a035fbf6b5f10872b1e3e | 9,328 | py | Python | models.py | JeffreyTsang/Brickbreaker | 37f0d143e9f937027fc281aef1511d0e9c804b8b | [
"MIT"
] | null | null | null | models.py | JeffreyTsang/Brickbreaker | 37f0d143e9f937027fc281aef1511d0e9c804b8b | [
"MIT"
] | null | null | null | models.py | JeffreyTsang/Brickbreaker | 37f0d143e9f937027fc281aef1511d0e9c804b8b | [
"MIT"
] | null | null | null | # models.py
# Michael Huang (mh999), Jeffrey Tsang (jet253)
# December 3rd, 2016
"""Models module for Breakout
This module contains the model classes for the Breakout game. That is anything that you
interact with on the screen is model: the paddle, the ball, and any of the bricks.
Technically, just because something is a model does not mean there has to be a special
class for it. Unless you need something special, both paddle and individual bricks could
just be instances of GRectangle. However, we do need something special: collision
detection. That is why we have custom classes.
You are free to add new models to this module. You may wish to do this when you add
new features to your game. If you are unsure about whether to make a new class or
not, please ask on Piazza."""
import random # To randomly generate the ball velocity
from constants import *
from game2d import *
# PRIMARY RULE: Models are not allowed to access anything except the module constants.py.
# If you need extra information from Play, then it should be a parameter in your method,
# and Play should pass it as a argument when it calls the method.
class Paddle(GRectangle):
"""An instance is the game paddle.
This class contains a method to detect collision with the ball, as well as move it
left and right. You may wish to add more features to this class.
The attributes of this class are those inherited from GRectangle.
LIST MORE ATTRIBUTES (AND THEIR INVARIANTS) HERE IF NECESSARY
"""
# GETTERS AND SETTERS (ONLY ADD IF YOU NEED THEM)
# INITIALIZER TO CREATE A NEW PADDLE
def __init__(self, x, bottom, width, height, color):
"""Creates a paddle of Parent class Grectangle
Initializer: Creates a GRectangle object as the paddle. Paddle is a
subclass of GRectangle with the given arguments.
Parameter x: The x-coordinate of the paddle
Precondition: x is a number (int or float)
Parameter bottom: the vertical coordinate of the bottom edge
of the paddle
Precondition: bottom is a number(int or float)
Parameter width: the paddle width
Precondition: width is a number(int or float)>=0
Parameter height:the paddle height
Precondition: width is a number(int or float)>=0
Parameter color: the paddle color
Precondition:color is an RGB object of class colormodel"""
GRectangle.__init__(self, x=x, bottom=bottom, width=width,
height=height, linecolor=color, fillcolor=color)
# METHODS TO MOVE THE PADDLE AND CHECK FOR COLLISIONS
def move(self,press):
"""Moves the paddle left and right in the bounds of the window
Sets left attribute to 0 and right attribute to width of game window
to create boundaries
Parameter: a number added to the x coordinate of the paddle moves
in one key press
Precondition: press is a number(int or float)"""
self.x+=press
if self.left<0:
self.left=0
if self.right>GAME_WIDTH:
self.right=GAME_WIDTH
def collides(self,ball):
"""Returns: True if the ball collides with this brick
Parameter ball: The ball to check
Precondition: ball is of class Ball"""
if ball._vy<0:
return self.contains(ball.x-BALL_RADIUS, ball.y-BALL_RADIUS) or\
self.contains(ball.x-BALL_RADIUS, ball.y+BALL_RADIUS)or\
self.contains(ball.x+BALL_RADIUS, ball.y-BALL_RADIUS) or\
self.contains(ball.x+BALL_RADIUS, ball.y+BALL_RADIUS)
# ADD MORE METHODS (PROPERLY SPECIFIED) AS NECESSARY
class Brick(GRectangle):
"""An instance is the game paddle.
This class contains a method to detect collision with the ball. You may wish to
add more features to this class.
The attributes of this class are those inherited from GRectangle.
LIST MORE ATTRIBUTES (AND THEIR INVARIANTS) HERE IF NECESSARY
"""
# GETTERS AND SETTERS (ONLY ADD IF YOU NEED THEM)
# INITIALIZER TO CREATE A BRICK
def __init__(self, left,y, width, height, color):
"""Initializer: creates a GRectangle object as the brick. Brick is a
subclass of GRectangle with the given arguments.
Parameter left: The left edge of the paddle
Precondition: left is a number (int or float)
Parameter y: the vertical coordinate of the paddle
Precondition: bottom is a number(int or float)
Parameter width: the paddle width
Precondition: width is a number(int or float)>=0
Parameter height:the paddle height
Precondition: width is a number(int or float)>=0
Parameter color: the paddle color
Precondition:color is an RGB object of class colormodel"""
GRectangle.__init__(self, left=left, y=y, width=width, height=height, \
linecolor=color, fillcolor=color)
# METHOD TO CHECK FOR COLLISION
def collides(self,ball):
"""Returns: True if the ball collides with this brick
Parameter ball: The ball to check
Precondition: ball is of class Ball"""
return self.contains(ball.x-BALL_RADIUS, ball.y-BALL_RADIUS) or\
self.contains(ball.x-BALL_RADIUS, ball.y+BALL_RADIUS)
# ADD MORE METHODS (PROPERLY SPECIFIED) AS NECESSARY
class Ball(GEllipse):
"""Instance is a game ball.
We extend GEllipse because a ball must have additional attributes for velocity.
This class adds this attributes and manages them.
INSTANCE ATTRIBUTES:
_vx [int or float]: Velocity in x direction
_vy [int or float]: Velocity in y direction
The class Play will need to look at these attributes, so you will need
getters for them. However, it is possible to write this assignment with no
setters for the velocities.
How? The only time the ball can change velocities is if it hits an obstacle
(paddle or brick) or if it hits a wall. Why not just write methods for these
instead of using setters? This cuts down on the amount of code in Gameplay.
NOTE: The ball does not have to be a GEllipse. It could be an instance
of GImage (why?). This change is allowed, but you must modify the class
header up above.
LIST MORE ATTRIBUTES (AND THEIR INVARIANTS) HERE IF NECESSARY
"""
# GETTERS AND SETTERS (ONLY ADD IF YOU NEED THEM)
def getVX(self):
"""Returns: velocity in x direction of ball"""
return self._vx
def getVY(self):
"""Returns: velocity in y direction of ball"""
return self._vy
def setVY(self, value):
"""Sets vy to value
Parameter value:value is a number(int or float)"""
assert (type(value)==int or type(value)==float)
self._vy=value
# INITIALIZER TO SET RANDOM VELOCITY
def __init__(self, x, y, width, height, color):
"""Initializer: creates a GRectangle object as the ball. Ball is a
subclass of GRectangle with the given arguments for the instance. The
initializer also sets the default values for attributes _vx and _vy.
Parameter x: The x coordinate of the paddle
Precondition: left is a number (int or float)
Parameter y: the y coordinate of the paddle
Precondition: bottom is a number(int or float)
Parameter width: the paddle width
Precondition: width is a number(int or float)>=0
Parameter height:the paddle height
Precondition: width is a number(int or float)>=0
Parameter color: the paddle color
Precondition:color is an RGB object of class colormodel"""
GEllipse.__init__(self, x=x, y=y, width=width, height=height,\
fillcolor=color)
self._vx = random.uniform(1.0,5.0)
self._vx = self._vx * random.choice([-1, 1])
self.setVY(-2.0)
# METHODS TO MOVE AND/OR BOUNCE THE BALL
def step(self):
"""Modifies the x and y attributes of the Ball instance to allow it
to move at random speeds"""
self.x=self.x+self._vx
self.y=self.y+self._vy
def bounce(self):
"""Modifies the _vy and _vx class attributes to be negative when the
ball object hits any of the four corners of the game window."""
if self.y>=GAME_HEIGHT:
self._vy=-self._vy
if self.x>=GAME_WIDTH:
self._vx=-self._vx
if self.x<=0:
self._vx=-self._vx
if self.y<=0:
self._vy=-self._vy
# ADD MORE METHODS (PROPERLY SPECIFIED) AS NECESSARY
def bottom(self):
"""Returns: True if the y coordinate of the ball passes through the
bottom of the screen; False otherwise
Allows the Ball object to pass through the bottom of the game
window if the player does not catch the ball with the paddle."""
if self.y<=0:
return True
else:
return False
# IF YOU NEED ADDITIONAL MODEL CLASSES, THEY GO HERE
| 40.206897 | 89 | 0.650836 | # models.py
# Michael Huang (mh999), Jeffrey Tsang (jet253)
# December 3rd, 2016
"""Models module for Breakout
This module contains the model classes for the Breakout game. That is anything that you
interact with on the screen is model: the paddle, the ball, and any of the bricks.
Technically, just because something is a model does not mean there has to be a special
class for it. Unless you need something special, both paddle and individual bricks could
just be instances of GRectangle. However, we do need something special: collision
detection. That is why we have custom classes.
You are free to add new models to this module. You may wish to do this when you add
new features to your game. If you are unsure about whether to make a new class or
not, please ask on Piazza."""
import random # To randomly generate the ball velocity
from constants import *
from game2d import *
# PRIMARY RULE: Models are not allowed to access anything except the module constants.py.
# If you need extra information from Play, then it should be a parameter in your method,
# and Play should pass it as a argument when it calls the method.
class Paddle(GRectangle):
"""An instance is the game paddle.
This class contains a method to detect collision with the ball, as well as move it
left and right. You may wish to add more features to this class.
The attributes of this class are those inherited from GRectangle.
LIST MORE ATTRIBUTES (AND THEIR INVARIANTS) HERE IF NECESSARY
"""
# GETTERS AND SETTERS (ONLY ADD IF YOU NEED THEM)
# INITIALIZER TO CREATE A NEW PADDLE
def __init__(self, x, bottom, width, height, color):
"""Creates a paddle of Parent class Grectangle
Initializer: Creates a GRectangle object as the paddle. Paddle is a
subclass of GRectangle with the given arguments.
Parameter x: The x-coordinate of the paddle
Precondition: x is a number (int or float)
Parameter bottom: the vertical coordinate of the bottom edge
of the paddle
Precondition: bottom is a number(int or float)
Parameter width: the paddle width
Precondition: width is a number(int or float)>=0
Parameter height:the paddle height
Precondition: width is a number(int or float)>=0
Parameter color: the paddle color
Precondition:color is an RGB object of class colormodel"""
GRectangle.__init__(self, x=x, bottom=bottom, width=width,
height=height, linecolor=color, fillcolor=color)
# METHODS TO MOVE THE PADDLE AND CHECK FOR COLLISIONS
def move(self,press):
"""Moves the paddle left and right in the bounds of the window
Sets left attribute to 0 and right attribute to width of game window
to create boundaries
Parameter: a number added to the x coordinate of the paddle moves
in one key press
Precondition: press is a number(int or float)"""
self.x+=press
if self.left<0:
self.left=0
if self.right>GAME_WIDTH:
self.right=GAME_WIDTH
def collides(self,ball):
"""Returns: True if the ball collides with this brick
Parameter ball: The ball to check
Precondition: ball is of class Ball"""
if ball._vy<0:
return self.contains(ball.x-BALL_RADIUS, ball.y-BALL_RADIUS) or\
self.contains(ball.x-BALL_RADIUS, ball.y+BALL_RADIUS)or\
self.contains(ball.x+BALL_RADIUS, ball.y-BALL_RADIUS) or\
self.contains(ball.x+BALL_RADIUS, ball.y+BALL_RADIUS)
# ADD MORE METHODS (PROPERLY SPECIFIED) AS NECESSARY
class Brick(GRectangle):
"""An instance is the game paddle.
This class contains a method to detect collision with the ball. You may wish to
add more features to this class.
The attributes of this class are those inherited from GRectangle.
LIST MORE ATTRIBUTES (AND THEIR INVARIANTS) HERE IF NECESSARY
"""
# GETTERS AND SETTERS (ONLY ADD IF YOU NEED THEM)
# INITIALIZER TO CREATE A BRICK
def __init__(self, left,y, width, height, color):
"""Initializer: creates a GRectangle object as the brick. Brick is a
subclass of GRectangle with the given arguments.
Parameter left: The left edge of the paddle
Precondition: left is a number (int or float)
Parameter y: the vertical coordinate of the paddle
Precondition: bottom is a number(int or float)
Parameter width: the paddle width
Precondition: width is a number(int or float)>=0
Parameter height:the paddle height
Precondition: width is a number(int or float)>=0
Parameter color: the paddle color
Precondition:color is an RGB object of class colormodel"""
GRectangle.__init__(self, left=left, y=y, width=width, height=height, \
linecolor=color, fillcolor=color)
# METHOD TO CHECK FOR COLLISION
def collides(self,ball):
"""Returns: True if the ball collides with this brick
Parameter ball: The ball to check
Precondition: ball is of class Ball"""
return self.contains(ball.x-BALL_RADIUS, ball.y-BALL_RADIUS) or\
self.contains(ball.x-BALL_RADIUS, ball.y+BALL_RADIUS)
# ADD MORE METHODS (PROPERLY SPECIFIED) AS NECESSARY
class Ball(GEllipse):
"""Instance is a game ball.
We extend GEllipse because a ball must have additional attributes for velocity.
This class adds this attributes and manages them.
INSTANCE ATTRIBUTES:
_vx [int or float]: Velocity in x direction
_vy [int or float]: Velocity in y direction
The class Play will need to look at these attributes, so you will need
getters for them. However, it is possible to write this assignment with no
setters for the velocities.
How? The only time the ball can change velocities is if it hits an obstacle
(paddle or brick) or if it hits a wall. Why not just write methods for these
instead of using setters? This cuts down on the amount of code in Gameplay.
NOTE: The ball does not have to be a GEllipse. It could be an instance
of GImage (why?). This change is allowed, but you must modify the class
header up above.
LIST MORE ATTRIBUTES (AND THEIR INVARIANTS) HERE IF NECESSARY
"""
# GETTERS AND SETTERS (ONLY ADD IF YOU NEED THEM)
def getVX(self):
"""Returns: velocity in x direction of ball"""
return self._vx
def getVY(self):
"""Returns: velocity in y direction of ball"""
return self._vy
def setVY(self, value):
"""Sets vy to value
Parameter value:value is a number(int or float)"""
assert (type(value)==int or type(value)==float)
self._vy=value
# INITIALIZER TO SET RANDOM VELOCITY
def __init__(self, x, y, width, height, color):
"""Initializer: creates a GRectangle object as the ball. Ball is a
subclass of GRectangle with the given arguments for the instance. The
initializer also sets the default values for attributes _vx and _vy.
Parameter x: The x coordinate of the paddle
Precondition: left is a number (int or float)
Parameter y: the y coordinate of the paddle
Precondition: bottom is a number(int or float)
Parameter width: the paddle width
Precondition: width is a number(int or float)>=0
Parameter height:the paddle height
Precondition: width is a number(int or float)>=0
Parameter color: the paddle color
Precondition:color is an RGB object of class colormodel"""
GEllipse.__init__(self, x=x, y=y, width=width, height=height,\
fillcolor=color)
self._vx = random.uniform(1.0,5.0)
self._vx = self._vx * random.choice([-1, 1])
self.setVY(-2.0)
# METHODS TO MOVE AND/OR BOUNCE THE BALL
def step(self):
"""Modifies the x and y attributes of the Ball instance to allow it
to move at random speeds"""
self.x=self.x+self._vx
self.y=self.y+self._vy
def bounce(self):
"""Modifies the _vy and _vx class attributes to be negative when the
ball object hits any of the four corners of the game window."""
if self.y>=GAME_HEIGHT:
self._vy=-self._vy
if self.x>=GAME_WIDTH:
self._vx=-self._vx
if self.x<=0:
self._vx=-self._vx
if self.y<=0:
self._vy=-self._vy
# ADD MORE METHODS (PROPERLY SPECIFIED) AS NECESSARY
def bottom(self):
"""Returns: True if the y coordinate of the ball passes through the
bottom of the screen; False otherwise
Allows the Ball object to pass through the bottom of the game
window if the player does not catch the ball with the paddle."""
if self.y<=0:
return True
else:
return False
# IF YOU NEED ADDITIONAL MODEL CLASSES, THEY GO HERE
| 0 | 0 | 0 |
7531c0ed47dcaf71bfac59ee5e705618a5bacdbf | 186 | py | Python | mrc/review_form/review_form.py | SN4KEBYTE/mrc | dca4b5d6fec236156bafca904bd7481d04ee3451 | [
"MIT"
] | null | null | null | mrc/review_form/review_form.py | SN4KEBYTE/mrc | dca4b5d6fec236156bafca904bd7481d04ee3451 | [
"MIT"
] | null | null | null | mrc/review_form/review_form.py | SN4KEBYTE/mrc | dca4b5d6fec236156bafca904bd7481d04ee3451 | [
"MIT"
] | null | null | null | from wtforms import Form, TextAreaField, validators
| 31 | 107 | 0.774194 | from wtforms import Form, TextAreaField, validators
class ReviewForm(Form):
movie_review: TextAreaField = TextAreaField('', [validators.DataRequired(), validators.length(min=15)])
| 0 | 110 | 23 |
543dbe56b8a9f46a7e6cfdb575bff7fc61babb0b | 2,718 | py | Python | tests.py | afcarl/campaign-bot | db82b41b5a764673f60ce744c72d201e702c7a1d | [
"MIT"
] | 1 | 2019-04-22T16:46:06.000Z | 2019-04-22T16:46:06.000Z | tests.py | afcarl/campaign-bot | db82b41b5a764673f60ce744c72d201e702c7a1d | [
"MIT"
] | null | null | null | tests.py | afcarl/campaign-bot | db82b41b5a764673f60ce744c72d201e702c7a1d | [
"MIT"
] | null | null | null | """ super simple tests """
import os
import mail_script
import requests
from jinja2 import Template, Environment, FileSystemLoader
jinja_environment = Environment(autoescape=True,loader=FileSystemLoader(os.path.join(os.path.dirname(__file__), 'templates')))
test_filing = {
'date': 'today',
'committees': [
{
'committee_name': 'test committee',
'committee_id': '123',
'committee_name': 'Lindsay\'s fictional committee',
'filings': [
{
'candidate_name': '',
'file_number': '123456',
'amendment_indicator': 'N',
'report_type': 'F3',
'report_type_full': 'Sept Quarterly',
'total_receipts': '1000',
'total_disbursements': '5000',
'total_independent_expenditures': '23',
'receipt_date': '08/30/2016',
'coverage_start_date': '07/01/2016',
'coverage_end_date': '08/30/2016',
'url': 'www.example.com',
},
{
'candidate_name': 'person',
'file_number': '12346',
'amendment_indicator': 'N',
'report_type': 'F3',
'report_type_full': 'Sept Quarterly',
'total_receipts': '1000',
'total_disbursements': '5000',
'total_independent_expenditures': '23',
'receipt_date': '08/30/2016',
'coverage_start_date': '07/01/2016',
'coverage_end_date': '08/30/2016',
'url': 'www.example.com',
},
]
}
]
}
test_email_render()
test_email(test_filing)
# data = (r['sub_id'], # primary key
# r['committee_id'],
# r['committee_name'],
# r['candidate_name']
# r['file_number'],
# r['amendment_indicator'],
# r['report_type']
# r['report_type_full'],
# r['total_receipts'],
# r['total_disbursements'],
# r['total_independent_expenditures'],
# r['receipt_date'],
# r['coverage_start_date'],
# r['coverage_end_date'],
# r['pages'],
# 'http://docquery.fec.gov/dcdev/posted/{0}.fec'.format(r['file_number']) | 33.975 | 126 | 0.511038 | """ super simple tests """
import os
import mail_script
import requests
from jinja2 import Template, Environment, FileSystemLoader
jinja_environment = Environment(autoescape=True,loader=FileSystemLoader(os.path.join(os.path.dirname(__file__), 'templates')))
test_filing = {
'date': 'today',
'committees': [
{
'committee_name': 'test committee',
'committee_id': '123',
'committee_name': 'Lindsay\'s fictional committee',
'filings': [
{
'candidate_name': '',
'file_number': '123456',
'amendment_indicator': 'N',
'report_type': 'F3',
'report_type_full': 'Sept Quarterly',
'total_receipts': '1000',
'total_disbursements': '5000',
'total_independent_expenditures': '23',
'receipt_date': '08/30/2016',
'coverage_start_date': '07/01/2016',
'coverage_end_date': '08/30/2016',
'url': 'www.example.com',
},
{
'candidate_name': 'person',
'file_number': '12346',
'amendment_indicator': 'N',
'report_type': 'F3',
'report_type_full': 'Sept Quarterly',
'total_receipts': '1000',
'total_disbursements': '5000',
'total_independent_expenditures': '23',
'receipt_date': '08/30/2016',
'coverage_start_date': '07/01/2016',
'coverage_end_date': '08/30/2016',
'url': 'www.example.com',
},
]
}
]
}
def test_email_render():
template = jinja_environment.get_template('test_template.html')
text = template.render(test_filing)
print(text)
def test_email(data):
server = mail_script.email_log_in()
mail_script.mail_update(server, data, os.environ['ADMIN_EMAIL'])
test_email_render()
test_email(test_filing)
# data = (r['sub_id'], # primary key
# r['committee_id'],
# r['committee_name'],
# r['candidate_name']
# r['file_number'],
# r['amendment_indicator'],
# r['report_type']
# r['report_type_full'],
# r['total_receipts'],
# r['total_disbursements'],
# r['total_independent_expenditures'],
# r['receipt_date'],
# r['coverage_start_date'],
# r['coverage_end_date'],
# r['pages'],
# 'http://docquery.fec.gov/dcdev/posted/{0}.fec'.format(r['file_number']) | 236 | 0 | 46 |
9c83e77ead949d28aa78d167f38b9d3cb628f582 | 9,620 | py | Python | zaza/charm_tests/test_utils.py | yoshikado/zaza | b2dbf7aa47b709b79b941dc0a017ce651b9e0cb5 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | zaza/charm_tests/test_utils.py | yoshikado/zaza | b2dbf7aa47b709b79b941dc0a017ce651b9e0cb5 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | zaza/charm_tests/test_utils.py | yoshikado/zaza | b2dbf7aa47b709b79b941dc0a017ce651b9e0cb5 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2019-06-25T09:52:11.000Z | 2019-06-25T09:52:11.000Z | # Copyright 2018 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containg base class for implementing charm tests."""
import contextlib
import logging
import unittest
import zaza.model
import zaza.model as model
import zaza.charm_lifecycle.utils as lifecycle_utils
import zaza.utilities.openstack as openstack_utils
def skipIfNotHA(service_name):
"""Run decorator to skip tests if application not in HA configuration."""
return _skipIfNotHA_inner_1
class OpenStackBaseTest(unittest.TestCase):
"""Generic helpers for testing OpenStack API charms."""
@classmethod
def setUpClass(cls):
"""Run setup for test class to create common resourcea."""
cls.keystone_session = openstack_utils.get_overcloud_keystone_session()
cls.model_name = model.get_juju_model()
cls.test_config = lifecycle_utils.get_charm_config()
cls.application_name = cls.test_config['charm_name']
cls.lead_unit = model.get_lead_unit_name(
cls.application_name,
model_name=cls.model_name)
logging.debug('Leader unit is {}'.format(cls.lead_unit))
@contextlib.contextmanager
def config_change(self, default_config, alternate_config):
"""Run change config tests.
Change config to `alternate_config`, wait for idle workload status,
yield, return config to `default_config` and wait for idle workload
status before return from function.
Example usage:
with self.config_change({'preferred-api-version': '2'},
{'preferred-api-version': '3'}):
do_something()
:param default_config: Dict of charm settings to set on completion
:type default_config: dict
:param alternate_config: Dict of charm settings to change to
:type alternate_config: dict
"""
# we need to compare config values to what is already applied before
# attempting to set them. otherwise the model will behave differently
# than we would expect while waiting for completion of the change
_app_config = model.get_application_config(self.application_name)
app_config = {}
# convert the more elaborate config structure from libjuju to something
# we can compare to what the caller supplies to this function
for k in alternate_config.keys():
# note that conversion to string for all values is due to
# attempting to set any config with other types lead to Traceback
app_config[k] = str(_app_config.get(k, {}).get('value', ''))
if all(item in app_config.items()
for item in alternate_config.items()):
logging.debug('alternate_config equals what is already applied '
'config')
yield
if default_config == alternate_config:
logging.debug('default_config also equals what is already '
'applied config')
return
logging.debug('alternate_config already set, and default_config '
'needs to be applied before return')
else:
logging.debug('Changing charm setting to {}'
.format(alternate_config))
model.set_application_config(
self.application_name,
alternate_config,
model_name=self.model_name)
logging.debug(
'Waiting for units to execute config-changed hook')
model.wait_for_agent_status(model_name=self.model_name)
logging.debug(
'Waiting for units to reach target states')
model.wait_for_application_states(
model_name=self.model_name,
states=self.test_config.get('target_deploy_status', {}))
# TODO: Optimize with a block on a specific application until idle.
model.block_until_all_units_idle()
yield
logging.debug('Restoring charm setting to {}'.format(default_config))
model.set_application_config(
self.application_name,
default_config,
model_name=self.model_name)
logging.debug(
'Waiting for units to reach target states')
model.wait_for_application_states(
model_name=self.model_name,
states=self.test_config.get('target_deploy_status', {}))
# TODO: Optimize with a block on a specific application until idle.
model.block_until_all_units_idle()
def restart_on_changed(self, config_file, default_config, alternate_config,
default_entry, alternate_entry, services):
"""Run restart on change tests.
Test that changing config results in config file being updates and
services restarted. Return config to default_config afterwards
:param config_file: Config file to check for settings
:type config_file: str
:param default_config: Dict of charm settings to set on completion
:type default_config: dict
:param alternate_config: Dict of charm settings to change to
:type alternate_config: dict
:param default_entry: Config file entries that correspond to
default_config
:type default_entry: dict
:param alternate_entry: Config file entries that correspond to
alternate_config
:type alternate_entry: dict
:param services: Services expected to be restarted when config_file is
changed.
:type services: list
"""
# lead_unit is only useed to grab a timestamp, the assumption being
# that all the units times are in sync.
mtime = model.get_unit_time(
self.lead_unit,
model_name=self.model_name)
logging.debug('Remote unit timestamp {}'.format(mtime))
with self.config_change(default_config, alternate_config):
logging.debug(
'Waiting for updates to propagate to {}'.format(config_file))
model.block_until_oslo_config_entries_match(
self.application_name,
config_file,
alternate_entry,
model_name=self.model_name)
# Config update has occured and hooks are idle. Any services should
# have been restarted by now:
logging.debug(
'Waiting for services ({}) to be restarted'.format(services))
model.block_until_services_restarted(
self.application_name,
mtime,
services,
model_name=self.model_name)
logging.debug(
'Waiting for updates to propagate to '.format(config_file))
model.block_until_oslo_config_entries_match(
self.application_name,
config_file,
default_entry,
model_name=self.model_name)
@contextlib.contextmanager
def pause_resume(self, services):
"""Run Pause and resume tests.
Pause and then resume a unit checking that services are in the
required state after each action
:param services: Services expected to be restarted when config_file is
changed.
:type services: list
"""
model.block_until_service_status(
self.lead_unit,
services,
'running',
model_name=self.model_name)
model.block_until_unit_wl_status(
self.lead_unit,
'active',
model_name=self.model_name)
model.run_action(
self.lead_unit,
'pause',
model_name=self.model_name)
model.block_until_unit_wl_status(
self.lead_unit,
'maintenance',
model_name=self.model_name)
model.block_until_all_units_idle(model_name=self.model_name)
model.block_until_service_status(
self.lead_unit,
services,
'stopped',
model_name=self.model_name)
yield
model.run_action(
self.lead_unit,
'resume',
model_name=self.model_name)
model.block_until_unit_wl_status(
self.lead_unit,
'active',
model_name=self.model_name)
model.block_until_all_units_idle(model_name=self.model_name)
model.block_until_service_status(
self.lead_unit,
services,
'running',
model_name=self.model_name)
| 40.251046 | 79 | 0.62474 | # Copyright 2018 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containg base class for implementing charm tests."""
import contextlib
import logging
import unittest
import zaza.model
import zaza.model as model
import zaza.charm_lifecycle.utils as lifecycle_utils
import zaza.utilities.openstack as openstack_utils
def skipIfNotHA(service_name):
"""Run decorator to skip tests if application not in HA configuration."""
def _skipIfNotHA_inner_1(f):
def _skipIfNotHA_inner_2(*args, **kwargs):
ips = zaza.model.get_app_ips(
service_name)
if len(ips) > 1:
return f(*args, **kwargs)
else:
logging.warn("Skipping HA test for non-ha service {}".format(
service_name))
return _skipIfNotHA_inner_2
return _skipIfNotHA_inner_1
class OpenStackBaseTest(unittest.TestCase):
"""Generic helpers for testing OpenStack API charms."""
@classmethod
def setUpClass(cls):
"""Run setup for test class to create common resourcea."""
cls.keystone_session = openstack_utils.get_overcloud_keystone_session()
cls.model_name = model.get_juju_model()
cls.test_config = lifecycle_utils.get_charm_config()
cls.application_name = cls.test_config['charm_name']
cls.lead_unit = model.get_lead_unit_name(
cls.application_name,
model_name=cls.model_name)
logging.debug('Leader unit is {}'.format(cls.lead_unit))
@contextlib.contextmanager
def config_change(self, default_config, alternate_config):
"""Run change config tests.
Change config to `alternate_config`, wait for idle workload status,
yield, return config to `default_config` and wait for idle workload
status before return from function.
Example usage:
with self.config_change({'preferred-api-version': '2'},
{'preferred-api-version': '3'}):
do_something()
:param default_config: Dict of charm settings to set on completion
:type default_config: dict
:param alternate_config: Dict of charm settings to change to
:type alternate_config: dict
"""
# we need to compare config values to what is already applied before
# attempting to set them. otherwise the model will behave differently
# than we would expect while waiting for completion of the change
_app_config = model.get_application_config(self.application_name)
app_config = {}
# convert the more elaborate config structure from libjuju to something
# we can compare to what the caller supplies to this function
for k in alternate_config.keys():
# note that conversion to string for all values is due to
# attempting to set any config with other types lead to Traceback
app_config[k] = str(_app_config.get(k, {}).get('value', ''))
if all(item in app_config.items()
for item in alternate_config.items()):
logging.debug('alternate_config equals what is already applied '
'config')
yield
if default_config == alternate_config:
logging.debug('default_config also equals what is already '
'applied config')
return
logging.debug('alternate_config already set, and default_config '
'needs to be applied before return')
else:
logging.debug('Changing charm setting to {}'
.format(alternate_config))
model.set_application_config(
self.application_name,
alternate_config,
model_name=self.model_name)
logging.debug(
'Waiting for units to execute config-changed hook')
model.wait_for_agent_status(model_name=self.model_name)
logging.debug(
'Waiting for units to reach target states')
model.wait_for_application_states(
model_name=self.model_name,
states=self.test_config.get('target_deploy_status', {}))
# TODO: Optimize with a block on a specific application until idle.
model.block_until_all_units_idle()
yield
logging.debug('Restoring charm setting to {}'.format(default_config))
model.set_application_config(
self.application_name,
default_config,
model_name=self.model_name)
logging.debug(
'Waiting for units to reach target states')
model.wait_for_application_states(
model_name=self.model_name,
states=self.test_config.get('target_deploy_status', {}))
# TODO: Optimize with a block on a specific application until idle.
model.block_until_all_units_idle()
def restart_on_changed(self, config_file, default_config, alternate_config,
default_entry, alternate_entry, services):
"""Run restart on change tests.
Test that changing config results in config file being updates and
services restarted. Return config to default_config afterwards
:param config_file: Config file to check for settings
:type config_file: str
:param default_config: Dict of charm settings to set on completion
:type default_config: dict
:param alternate_config: Dict of charm settings to change to
:type alternate_config: dict
:param default_entry: Config file entries that correspond to
default_config
:type default_entry: dict
:param alternate_entry: Config file entries that correspond to
alternate_config
:type alternate_entry: dict
:param services: Services expected to be restarted when config_file is
changed.
:type services: list
"""
# lead_unit is only useed to grab a timestamp, the assumption being
# that all the units times are in sync.
mtime = model.get_unit_time(
self.lead_unit,
model_name=self.model_name)
logging.debug('Remote unit timestamp {}'.format(mtime))
with self.config_change(default_config, alternate_config):
logging.debug(
'Waiting for updates to propagate to {}'.format(config_file))
model.block_until_oslo_config_entries_match(
self.application_name,
config_file,
alternate_entry,
model_name=self.model_name)
# Config update has occured and hooks are idle. Any services should
# have been restarted by now:
logging.debug(
'Waiting for services ({}) to be restarted'.format(services))
model.block_until_services_restarted(
self.application_name,
mtime,
services,
model_name=self.model_name)
logging.debug(
'Waiting for updates to propagate to '.format(config_file))
model.block_until_oslo_config_entries_match(
self.application_name,
config_file,
default_entry,
model_name=self.model_name)
@contextlib.contextmanager
def pause_resume(self, services):
"""Run Pause and resume tests.
Pause and then resume a unit checking that services are in the
required state after each action
:param services: Services expected to be restarted when config_file is
changed.
:type services: list
"""
model.block_until_service_status(
self.lead_unit,
services,
'running',
model_name=self.model_name)
model.block_until_unit_wl_status(
self.lead_unit,
'active',
model_name=self.model_name)
model.run_action(
self.lead_unit,
'pause',
model_name=self.model_name)
model.block_until_unit_wl_status(
self.lead_unit,
'maintenance',
model_name=self.model_name)
model.block_until_all_units_idle(model_name=self.model_name)
model.block_until_service_status(
self.lead_unit,
services,
'stopped',
model_name=self.model_name)
yield
model.run_action(
self.lead_unit,
'resume',
model_name=self.model_name)
model.block_until_unit_wl_status(
self.lead_unit,
'active',
model_name=self.model_name)
model.block_until_all_units_idle(model_name=self.model_name)
model.block_until_service_status(
self.lead_unit,
services,
'running',
model_name=self.model_name)
| 368 | 0 | 26 |
cd390bdaf658d6bcec6d6bdee90a2f489a11710d | 2,457 | py | Python | core/highlight/screen_highlight.py | echim/pySteps | c33ac3446593b545aece475062d140527dcb443c | [
"MIT"
] | 8 | 2018-05-15T21:20:40.000Z | 2021-08-19T00:25:18.000Z | core/highlight/screen_highlight.py | echim/pySteps | c33ac3446593b545aece475062d140527dcb443c | [
"MIT"
] | null | null | null | core/highlight/screen_highlight.py | echim/pySteps | c33ac3446593b545aece475062d140527dcb443c | [
"MIT"
] | 2 | 2018-09-12T01:33:54.000Z | 2021-01-25T02:21:58.000Z | from tkinter import *
from core.helpers.os_helpers import platform_is_windows, platform_is_linux, platform_is_darwin
from core.highlight.highlight_circle import HighlightCircle
from core.highlight.highlight_rectangle import HighlightRectangle
| 34.605634 | 115 | 0.582418 | from tkinter import *
from core.helpers.os_helpers import platform_is_windows, platform_is_linux, platform_is_darwin
from core.highlight.highlight_circle import HighlightCircle
from core.highlight.highlight_rectangle import HighlightRectangle
def _draw_circle(self, x, y, r, **kwargs):
return self.create_oval(x - r, y - r, x + r, y + r, **kwargs)
def _draw_rectangle(self, x, y, w, h, **kwargs):
rectangle = self.create_rectangle(0, 0, w, h, **kwargs)
self.move(rectangle, x, y)
class ScreenHighlight:
def draw_circle(self, a_circle: HighlightCircle):
return self.canvas.draw_circle(a_circle.center.x,
a_circle.center.y,
a_circle.radius,
outline=a_circle.color.value,
width=a_circle.thickness)
def draw_rectangle(self, a_rectangle: HighlightRectangle):
return self.canvas.draw_rectangle(a_rectangle.start_point.x,
a_rectangle.start_point.y,
a_rectangle.width,
a_rectangle.height,
outline=a_rectangle.color.value,
width=a_rectangle.thickness)
def quit(self):
self.root.quit()
self.root.destroy()
def render(self, for_ms):
self.root.after(for_ms, self.quit)
self.root.mainloop()
def __init__(self):
self.root = Tk()
s_width = self.root.winfo_screenwidth()
s_height = self.root.winfo_screenheight()
if platform_is_darwin():
self.root.wm_attributes('-topmost', True)
else:
self.root.overrideredirect(1)
canvas = Canvas(self.root, width=s_width, height=s_height, borderwidth=0, highlightthickness=0, bg="white")
canvas.grid()
Canvas.draw_circle = _draw_circle
Canvas.draw_rectangle = _draw_rectangle
if platform_is_windows():
self.root.wm_attributes("-transparentcolor", "white")
if platform_is_linux():
self.root.wait_visibility(self.root)
self.root.attributes('-alpha', 0.9)
if platform_is_darwin():
self.root.wm_attributes('-transparent', True)
canvas.config(bg='systemTransparent')
self.canvas = canvas
| 2,005 | 1 | 204 |
1d6b28dc555c9d1e03c38c6cb14e0dd32cebdbc6 | 2,425 | py | Python | greedy best first search/gbfs.py | ehabosaleh/heuristic-maze-traversing | 6583ec2b4d7beca2af544185f5d34710213b7790 | [
"MIT"
] | null | null | null | greedy best first search/gbfs.py | ehabosaleh/heuristic-maze-traversing | 6583ec2b4d7beca2af544185f5d34710213b7790 | [
"MIT"
] | null | null | null | greedy best first search/gbfs.py | ehabosaleh/heuristic-maze-traversing | 6583ec2b4d7beca2af544185f5d34710213b7790 | [
"MIT"
] | null | null | null | from collections import deque
| 44.907407 | 181 | 0.520825 | from collections import deque
class GBFS:
optimal_path=[]
shortest_path=[]
tree={}
cost={}
pos=0
path={}
def gbfs(self,src_node,dest_node):
queue=[]
self.path=[]
total_cost=[self.cost[src_node]]
found=False
queue.append(src_node)
self.optimal_path.append([src_node])
keys=list(self.tree)
while(len(queue)!=0):
nominated_node=queue[total_cost.index(min(total_cost))]
lower_cost=min(total_cost)
if nominated_node==dest_node:
found=True
pos=total_cost.index(min(total_cost))
self.path.append(queue.pop(queue.index(nominated_node)))
break
elif nominated_node not in keys :
self.path.append(queue.pop(queue.index(nominated_node)))
self.optimal_path.pop(total_cost.index(min(total_cost)))
total_cost.pop(total_cost.index(min(total_cost)))
elif nominated_node in keys:
V=nominated_node
self.path.append(queue.pop(queue.index(nominated_node)))
p=self.optimal_path[total_cost.index(min(total_cost))]
self.optimal_path.pop(total_cost.index(min(total_cost)))
total_cost.pop(total_cost.index(min(total_cost)))
for l in range(len(self.tree[V])):
if self.tree[V][l] not in self.path and self.tree[V][l] not in queue:
queue.append(self.tree[V][l])
total_cost.append(self.cost[ self.tree[V][l]])
self.optimal_path.append(p+[self.tree[V][l]])
if found==True:
self.shortest_path=self.optimal_path[pos]
print("GBFS:Optimal path from {} to {} is {}\n Path was found after traversing all of these nodes:\n {}".format(src_node,dest_node,self.optimal_path[pos],set(self.path)))
print('-'*50)
elif found==False:
print("Target {} is NOT reachable from source {} using UCS".format(dest_node,src_node))
print('-'*50)
self.optimal_path=[[self.shortest_path]]
del queue
| 2,268 | 105 | 22 |
9ed3b9df069b19db2b97f052b0de3f3663c36562 | 1,745 | py | Python | py/torch_tensorrt/fx/test/trt_lower/test_observer_gpu.py | hassan11196/Torch-TensorRT | a2d0d0e935bf223523a7c28d7814cdbd32f323b2 | [
"BSD-3-Clause"
] | 430 | 2021-11-09T08:08:01.000Z | 2022-03-31T10:13:45.000Z | py/torch_tensorrt/fx/test/trt_lower/test_observer_gpu.py | NVIDIA/Torch-TensorRT | 1a22204fecec690bc3c2a318dab4f57b98c57f05 | [
"BSD-3-Clause"
] | 257 | 2021-11-09T07:17:03.000Z | 2022-03-31T20:29:31.000Z | py/torch_tensorrt/fx/test/trt_lower/test_observer_gpu.py | hassan11196/Torch-TensorRT | a2d0d0e935bf223523a7c28d7814cdbd32f323b2 | [
"BSD-3-Clause"
] | 68 | 2021-11-10T05:03:22.000Z | 2022-03-22T17:07:32.000Z | # Owner(s): ["oncall: gpu_enablement"]
import functools
from unittest import TestCase
import torch_tensorrt.fx.observer as ob
from test_observer import execution_verifier, set_observer_callback_rethrow
from torch_tensorrt.fx.passes.lower_basic_pass import fuse_permute_linear
| 32.314815 | 78 | 0.629226 | # Owner(s): ["oncall: gpu_enablement"]
import functools
from unittest import TestCase
import torch_tensorrt.fx.observer as ob
from test_observer import execution_verifier, set_observer_callback_rethrow
from torch_tensorrt.fx.passes.lower_basic_pass import fuse_permute_linear
class ObserverGPUTests(TestCase):
@set_observer_callback_rethrow
def test_observe_lowerer(self):
"""
Test that we can observe the execution of `fuse_permute_linear` during
lowering.
"""
from dataclasses import replace
import torch
import torch.nn as nn
import torch_tensorrt.fx.lower as lower
from torch_tensorrt.fx.lower_setting import LowerSetting
class Model(nn.Module):
def forward(self, x, y):
return x + y
mod = Model().cuda()
inp = [torch.rand(1, 10), torch.rand(1, 10)]
inp = [i.cuda() for i in inp]
mod(*inp)
with execution_verifier() as verify_execution:
lowerer = lower.Lowerer.create(
lower_setting=LowerSetting(min_acc_module_size=0)
)
@verify_execution
def observe_fuse_permute_linear_post(ctx: ob.ObserveContext):
"""
Called when fuse_permute_linear is executed. Decorated with
`verify_execution` so if this function is not executed, the
test fails.
"""
assert ctx.callable is fuse_permute_linear.orig_func
# Register the observer callback and do the lowering
with fuse_permute_linear.observers.post.add(
observe_fuse_permute_linear_post
):
lowerer(mod, inp)
| 32 | 1,412 | 23 |
353e36f4f35b597ef0dcf8b5ab98d766ea5a7e74 | 13,078 | py | Python | BCL2FASTQPostprocessor.py | EdinburghGenomics/illuminatus | c92d6448675fb451f2dc284d67db1711f2974c74 | [
"BSD-2-Clause"
] | 1 | 2020-02-15T00:18:51.000Z | 2020-02-15T00:18:51.000Z | BCL2FASTQPostprocessor.py | EdinburghGenomics/illuminatus | c92d6448675fb451f2dc284d67db1711f2974c74 | [
"BSD-2-Clause"
] | 1 | 2021-03-30T10:50:38.000Z | 2021-03-30T10:50:39.000Z | BCL2FASTQPostprocessor.py | EdinburghGenomics/illuminatus | c92d6448675fb451f2dc284d67db1711f2974c74 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python3
""" "Fixes" the output of bcl2fastq to meet our requirements.
Files are renamed, grouped by pool, and shifted out of the demultiplexing directory.
projects_ready.txt is added listing the projects found
projects_pending.txt is deleted if it exists
"""
# I guess we could go back to keeping the files in /ifs/runqc until they are renamed,
# and this might be sensible for backup purposes. In any case I could do this with a
# symlink so the code can stay the same.
import os, sys, re, time
from glob import glob
import yaml
from collections import namedtuple
# Global error collector
ERRORS = set()
def main(output_dir, prefix=None):
""" Usage BCL2FASTQPostprocessor.py <run_dir> [prefix]
"""
output_dir = os.path.abspath(output_dir)
#The prefix is normally the run name ie. the folder name, but driver.sh
#will set this explicitly based on RunInfo.
if not prefix:
prefix = os.path.basename(output_dir)
#All renames need to be logged. The log wants to live in the demultiplexing/
#subdirectory.
demux_dir = output_dir + "/demultiplexing"
with open(os.path.join(demux_dir, 'renames.log'), 'a') as log_fh:
log("# %s" % sys.argv[0])
log("# renaming files in %s on %s" % (
demux_dir,
time.strftime('%Y-%m-%d %H:%M', time.localtime()) ))
project_seen = do_renames(output_dir, prefix, log=log)
if ERRORS:
log("# There were errors...")
for e in ERRORS:
print("Error: %s" % e)
log("# %s" % e)
else:
save_projects_ready(output_dir, project_seen)
log("# DONE. And projects_ready.txt was saved out.")
def save_projects_ready(output_dir, proj_seen):
"""Save out what we've processed. There might be stuff already in projects_ready.txt
and we want to maintain the contents as a sorted set (as per 'sort -u')
"""
proj_ready_file = os.path.join(output_dir, 'projects_ready.txt')
try:
with open(proj_ready_file) as pr_fh:
for l in pr_fh:
proj_seen.add(l.strip())
except FileNotFoundError:
# OK, there was no old file
pass
with open(proj_ready_file, 'w') as pr_fh:
for p in sorted(proj_seen):
# Only add projects for which there is a directory. This catches the
# case where a incorrect project name was in the sample sheet and
# the files have been completely flushed on re-do.
if os.path.isdir(os.path.join(output_dir,p)):
print(p, file=pr_fh)
# And delete projects_pending.txt. It probably doesn't exist, which is fine.
try:
os.unlink(os.path.join(output_dir, 'projects_pending.txt'))
except FileNotFoundError:
pass
def check_project_name(proj_name):
""" BCL2FASTQ is already quite fussy about project names.
This will just chack that the project name isn't going to clobber any
of our folders.
"""
if "." in proj_name:
raise ValueError("Invalid project name {!r} contains a period.".format(proj_name))
if proj_name in "counts demultiplexing md5sums multiqc_reports QC seqdata slurm_output".split():
raise ValueError("Invalid project name {!r} conflicts with reserved names.".format(proj_name))
def do_renames(output_dir, runid, log = lambda m: print(m)):
""" The main part of the code that does the renaming (moving).
Primary reason for splitting this out from main() is to separate
the sys.argv processing and the log file handling in order to
simplify unit testing.
Returns the list of projects for which files have been renamed.
"""
proj_seen = set()
# Previously we scanned for *.fastq.gz files, but it's more sensible to look for an explicit
# list of projects. The projects don't get listed in Stats.json, so go back to sample_summary.yml
# directly. This allows us to proceed even when no files were produced (ie. all the barcodes are wrong)
try:
with open( os.path.join( output_dir, "seqdata/pipeline" , "sample_summary.yml" ) ) as sfh:
summary = yaml.safe_load(sfh)
for proj in summary['ProjectInfo']:
add_project(proj)
# Now we can't add any new projects.
except FileNotFoundError:
log("Failed to read seqdata/pipeline/sample_summary.yml. Proceeding anyway.")
# Some funny-business with UMI reads. These come out as read 2 but we actually want to rename them
# to _UMI and rename the _3 read as _2. For this reason, gather up the file list first.
afile = namedtuple("afile", "samplename lane readnumber project pool_and_library".split())
all_fastq = set()
afile_to_filename = dict()
# Notwithstanding the list of projects obtained by the summary, look for fastq.gz files in all
# locations.
# Either we have a list of projects and will find corresponding fastq, or else we have no list and
# will make it up as we go along.
for fastq_file in glob(os.path.join( output_dir, "demultiplexing/lane*" , "*/*/*.fastq.gz" )):
#os.path.split is unhelpful here. Just do it the obvious way.
# something like: 10528, 10528EJ0019L01, 10528EJpool03_S19_L005_R1_001.fastq.gz
lane_dir, project, pool_and_library, filename = fastq_file.split('/')[-4:]
#Note the project as one we've processed.
add_project(project)
# get information from the filename
re_match = re.match( r'(.*)_(S[0-9]+)_L00(\d)_R(\d)_\d+.fastq.gz', filename, re.I)
if not re_match:
log("# skipping (regex mismatch) %s" % fastq_file)
continue
samplename = re_match.group(1) # e.g.: We ignore this!
lane = re_match.group(3) # e.g.: L00(5)
readnumber = re_match.group(4) # e.g.: R(1)
# Check lane matches the directory name
if not lane_dir == 'lane{}'.format(lane):
log("# skipping (lane mismatch) %s" % fastq_file)
continue
# Add this to the collection
thisfile = afile( samplename = samplename,
lane = lane,
readnumber = readnumber,
project = project,
pool_and_library = pool_and_library )
all_fastq.add(thisfile)
afile_to_filename[thisfile] = fastq_file
# Now go again for files not in a subdirectory (if Sample_Name was blank)
# (apologies for the copy-paste)
for fastq_file in glob(os.path.join( output_dir, "demultiplexing/lane*" , "*/*.fastq.gz" )):
#os.path.split is unhelpful here. Just do it the obvious way.
# something like: 10528, 10528EJ0019L01, 10528EJpool03_S19_L005_R1_001.fastq.gz
lane_dir, project, filename = fastq_file.split('/')[-3:]
#Note the project as one we've processed.
add_project(project)
# get information from the filename
# Note this ignores index reads.
re_match = re.match( r'(.*)_(S[0-9]+)_L00(\d)_R(\d)_\d+.fastq.gz', filename, re.I)
if not re_match:
log("# skipping (regex mismatch) %s" % fastq_file)
continue
pool_and_library = re_match.group(1) # e.g.: 10528EJpool03__10528EJ0019L01
lane = re_match.group(3) # e.g.: L00(5)
readnumber = re_match.group(4) # e.g.: R(1)
# Check lane matches the directory name
if not lane_dir == 'lane{}'.format(lane):
log("# skipping (lane mismatch) %s" % fastq_file)
continue
# Add this to the collection
thisfile = afile( samplename = '',
lane = lane,
readnumber = readnumber,
project = project,
pool_and_library = pool_and_library )
all_fastq.add(thisfile)
afile_to_filename[thisfile] = fastq_file
for f in all_fastq:
fastq_file = afile_to_filename[f]
readnumber = translate_read_number(f, all_fastq)
# split out library and pool
try:
pool, library = f.pool_and_library.split('__')
except ValueError:
#log("# skipping (no pool__library) %s" % fastq_file)
#continue
# Decided be a little less strict here. This is also needed for PhiX
pool = 'NoPool'
library = pool_and_library
new_filename = "{runid}_{f.lane}_{library}_{readnumber}.fastq.gz".format(**locals())
new_filename_relative = os.path.join ( f.project, pool, new_filename )
new_filename_absolute = os.path.join ( output_dir, new_filename_relative )
#Make the directory to put it in
os.makedirs(os.path.dirname(new_filename_absolute), exist_ok=True)
#Paranoia. Rather than checking if the file exists, create it exclusively.
#That way, no possible race condition that can cause one file to be renamed over
#another file (ignoring remote NFS race conditions).
try:
log( "mv %s %s" % ('/'.join(fastq_file.split('/')[-4:]), new_filename_relative) )
with open(new_filename_absolute, 'x') as tmp_fd:
os.replace(fastq_file, new_filename_absolute)
except FileExistsError:
log("# FileExistsError renaming %s" % new_filename_relative)
raise
# Now deal with the undetermined files.
undet_fastq = set()
for undet_file_absolute in glob(os.path.join( output_dir, "demultiplexing/lane*", "[Uu]ndetermined_*" )):
lane_dir, filename = undet_file_absolute.split('/')[-2:]
# eg. Undetermined_S0_L004_R1_001.fastq.gz
re_match = re.match( r'undetermined_(.*)_L00(\d)_R(\d)_\d+.fastq.gz', filename, re.I)
if not re_match:
log("# skipping %s" % fastq_file)
continue
lane = re_match.group(2)
readnumber = re_match.group(3)
# Check lane matches the directory name
if not lane_dir == 'lane{}'.format(lane):
log("# skipping (lane mismatch) %s" % fastq_file)
continue
# Add this to the collection
thisfile = afile( samplename = 'undetermined',
lane = lane,
readnumber = readnumber,
project = '',
pool_and_library = '' )
undet_fastq.add(thisfile)
afile_to_filename[thisfile] = undet_file_absolute
# And process the set we just collected
for f in undet_fastq:
fastq_file = afile_to_filename[f]
readnumber = translate_read_number(f, undet_fastq)
# eg. 160811_D00261_0355_BC9DA7ANXX_4_unassigned_1.fastq.gz
new_filename = "{runid}_{f.lane}_unassigned_{readnumber}.fastq.gz".format(**locals())
new_filename_absolute = os.path.join ( output_dir, new_filename )
#See comment above
try:
log( "mv %s %s" % ( os.path.join("demultiplexing", filename), new_filename) )
with open(new_filename_absolute, 'x') as tmp_fd:
os.rename(fastq_file, new_filename_absolute)
except FileExistsError:
log("# FileExistsError renaming %s" % new_filename)
raise
# Cleanup empty project directories (as per Cleanup.py) then warn if any dirs
# remain (or, if fact, that's an error).
for lane_dir in glob(os.path.join(output_dir, "demultiplexing", "lane*")):
for proj in list(proj_seen):
for root, dirs, files in os.walk(
os.path.join(lane_dir, proj),
topdown=False ):
try:
os.rmdir(root)
log("rmdir '%s'" % root)
except Exception:
# Assume it was non-empty.
ERRORS.add("Failed to remove all project directories from demultiplexing area.")
log("# could not remove dir '%s'" % root)
# And we cannot say the project is ready.
# TODO - Should I add it to pending??
proj_seen.discard(proj)
# Finally return the projects processed
return proj_seen
if __name__ == '__main__':
print("Running: " + ' '.join(sys.argv))
main(*sys.argv[1:])
if ERRORS: exit(1)
| 41.125786 | 109 | 0.61332 | #!/usr/bin/env python3
""" "Fixes" the output of bcl2fastq to meet our requirements.
Files are renamed, grouped by pool, and shifted out of the demultiplexing directory.
projects_ready.txt is added listing the projects found
projects_pending.txt is deleted if it exists
"""
# I guess we could go back to keeping the files in /ifs/runqc until they are renamed,
# and this might be sensible for backup purposes. In any case I could do this with a
# symlink so the code can stay the same.
import os, sys, re, time
from glob import glob
import yaml
from collections import namedtuple
# Global error collector
ERRORS = set()
def main(output_dir, prefix=None):
""" Usage BCL2FASTQPostprocessor.py <run_dir> [prefix]
"""
output_dir = os.path.abspath(output_dir)
#The prefix is normally the run name ie. the folder name, but driver.sh
#will set this explicitly based on RunInfo.
if not prefix:
prefix = os.path.basename(output_dir)
#All renames need to be logged. The log wants to live in the demultiplexing/
#subdirectory.
demux_dir = output_dir + "/demultiplexing"
with open(os.path.join(demux_dir, 'renames.log'), 'a') as log_fh:
def log(m): print(m, file=log_fh)
log("# %s" % sys.argv[0])
log("# renaming files in %s on %s" % (
demux_dir,
time.strftime('%Y-%m-%d %H:%M', time.localtime()) ))
project_seen = do_renames(output_dir, prefix, log=log)
if ERRORS:
log("# There were errors...")
for e in ERRORS:
print("Error: %s" % e)
log("# %s" % e)
else:
save_projects_ready(output_dir, project_seen)
log("# DONE. And projects_ready.txt was saved out.")
def save_projects_ready(output_dir, proj_seen):
"""Save out what we've processed. There might be stuff already in projects_ready.txt
and we want to maintain the contents as a sorted set (as per 'sort -u')
"""
proj_ready_file = os.path.join(output_dir, 'projects_ready.txt')
try:
with open(proj_ready_file) as pr_fh:
for l in pr_fh:
proj_seen.add(l.strip())
except FileNotFoundError:
# OK, there was no old file
pass
with open(proj_ready_file, 'w') as pr_fh:
for p in sorted(proj_seen):
# Only add projects for which there is a directory. This catches the
# case where a incorrect project name was in the sample sheet and
# the files have been completely flushed on re-do.
if os.path.isdir(os.path.join(output_dir,p)):
print(p, file=pr_fh)
# And delete projects_pending.txt. It probably doesn't exist, which is fine.
try:
os.unlink(os.path.join(output_dir, 'projects_pending.txt'))
except FileNotFoundError:
pass
def check_project_name(proj_name):
""" BCL2FASTQ is already quite fussy about project names.
This will just chack that the project name isn't going to clobber any
of our folders.
"""
if "." in proj_name:
raise ValueError("Invalid project name {!r} contains a period.".format(proj_name))
if proj_name in "counts demultiplexing md5sums multiqc_reports QC seqdata slurm_output".split():
raise ValueError("Invalid project name {!r} conflicts with reserved names.".format(proj_name))
def do_renames(output_dir, runid, log = lambda m: print(m)):
""" The main part of the code that does the renaming (moving).
Primary reason for splitting this out from main() is to separate
the sys.argv processing and the log file handling in order to
simplify unit testing.
Returns the list of projects for which files have been renamed.
"""
proj_seen = set()
def add_project(proj_name):
check_project_name(proj_name)
proj_seen.add(proj_name)
# Previously we scanned for *.fastq.gz files, but it's more sensible to look for an explicit
# list of projects. The projects don't get listed in Stats.json, so go back to sample_summary.yml
# directly. This allows us to proceed even when no files were produced (ie. all the barcodes are wrong)
try:
with open( os.path.join( output_dir, "seqdata/pipeline" , "sample_summary.yml" ) ) as sfh:
summary = yaml.safe_load(sfh)
for proj in summary['ProjectInfo']:
add_project(proj)
# Now we can't add any new projects.
def add_project(proj_name):
assert proj_name in proj_seen
except FileNotFoundError:
log("Failed to read seqdata/pipeline/sample_summary.yml. Proceeding anyway.")
# Some funny-business with UMI reads. These come out as read 2 but we actually want to rename them
# to _UMI and rename the _3 read as _2. For this reason, gather up the file list first.
afile = namedtuple("afile", "samplename lane readnumber project pool_and_library".split())
all_fastq = set()
afile_to_filename = dict()
def translate_read_number(f, set_of_f):
if f.readnumber == "2":
# If we are dealing with UMI's we'll see a corresponding read3
if f._replace(readnumber="3") in set_of_f:
return "UMI"
elif f.readnumber == "3":
assert f._replace(readnumber="2") in set_of_f
return "2"
return f.readnumber
# Notwithstanding the list of projects obtained by the summary, look for fastq.gz files in all
# locations.
# Either we have a list of projects and will find corresponding fastq, or else we have no list and
# will make it up as we go along.
for fastq_file in glob(os.path.join( output_dir, "demultiplexing/lane*" , "*/*/*.fastq.gz" )):
#os.path.split is unhelpful here. Just do it the obvious way.
# something like: 10528, 10528EJ0019L01, 10528EJpool03_S19_L005_R1_001.fastq.gz
lane_dir, project, pool_and_library, filename = fastq_file.split('/')[-4:]
#Note the project as one we've processed.
add_project(project)
# get information from the filename
re_match = re.match( r'(.*)_(S[0-9]+)_L00(\d)_R(\d)_\d+.fastq.gz', filename, re.I)
if not re_match:
log("# skipping (regex mismatch) %s" % fastq_file)
continue
samplename = re_match.group(1) # e.g.: We ignore this!
lane = re_match.group(3) # e.g.: L00(5)
readnumber = re_match.group(4) # e.g.: R(1)
# Check lane matches the directory name
if not lane_dir == 'lane{}'.format(lane):
log("# skipping (lane mismatch) %s" % fastq_file)
continue
# Add this to the collection
thisfile = afile( samplename = samplename,
lane = lane,
readnumber = readnumber,
project = project,
pool_and_library = pool_and_library )
all_fastq.add(thisfile)
afile_to_filename[thisfile] = fastq_file
# Now go again for files not in a subdirectory (if Sample_Name was blank)
# (apologies for the copy-paste)
for fastq_file in glob(os.path.join( output_dir, "demultiplexing/lane*" , "*/*.fastq.gz" )):
#os.path.split is unhelpful here. Just do it the obvious way.
# something like: 10528, 10528EJ0019L01, 10528EJpool03_S19_L005_R1_001.fastq.gz
lane_dir, project, filename = fastq_file.split('/')[-3:]
#Note the project as one we've processed.
add_project(project)
# get information from the filename
# Note this ignores index reads.
re_match = re.match( r'(.*)_(S[0-9]+)_L00(\d)_R(\d)_\d+.fastq.gz', filename, re.I)
if not re_match:
log("# skipping (regex mismatch) %s" % fastq_file)
continue
pool_and_library = re_match.group(1) # e.g.: 10528EJpool03__10528EJ0019L01
lane = re_match.group(3) # e.g.: L00(5)
readnumber = re_match.group(4) # e.g.: R(1)
# Check lane matches the directory name
if not lane_dir == 'lane{}'.format(lane):
log("# skipping (lane mismatch) %s" % fastq_file)
continue
# Add this to the collection
thisfile = afile( samplename = '',
lane = lane,
readnumber = readnumber,
project = project,
pool_and_library = pool_and_library )
all_fastq.add(thisfile)
afile_to_filename[thisfile] = fastq_file
for f in all_fastq:
fastq_file = afile_to_filename[f]
readnumber = translate_read_number(f, all_fastq)
# split out library and pool
try:
pool, library = f.pool_and_library.split('__')
except ValueError:
#log("# skipping (no pool__library) %s" % fastq_file)
#continue
# Decided be a little less strict here. This is also needed for PhiX
pool = 'NoPool'
library = pool_and_library
new_filename = "{runid}_{f.lane}_{library}_{readnumber}.fastq.gz".format(**locals())
new_filename_relative = os.path.join ( f.project, pool, new_filename )
new_filename_absolute = os.path.join ( output_dir, new_filename_relative )
#Make the directory to put it in
os.makedirs(os.path.dirname(new_filename_absolute), exist_ok=True)
#Paranoia. Rather than checking if the file exists, create it exclusively.
#That way, no possible race condition that can cause one file to be renamed over
#another file (ignoring remote NFS race conditions).
try:
log( "mv %s %s" % ('/'.join(fastq_file.split('/')[-4:]), new_filename_relative) )
with open(new_filename_absolute, 'x') as tmp_fd:
os.replace(fastq_file, new_filename_absolute)
except FileExistsError:
log("# FileExistsError renaming %s" % new_filename_relative)
raise
# Now deal with the undetermined files.
undet_fastq = set()
for undet_file_absolute in glob(os.path.join( output_dir, "demultiplexing/lane*", "[Uu]ndetermined_*" )):
lane_dir, filename = undet_file_absolute.split('/')[-2:]
# eg. Undetermined_S0_L004_R1_001.fastq.gz
re_match = re.match( r'undetermined_(.*)_L00(\d)_R(\d)_\d+.fastq.gz', filename, re.I)
if not re_match:
log("# skipping %s" % fastq_file)
continue
lane = re_match.group(2)
readnumber = re_match.group(3)
# Check lane matches the directory name
if not lane_dir == 'lane{}'.format(lane):
log("# skipping (lane mismatch) %s" % fastq_file)
continue
# Add this to the collection
thisfile = afile( samplename = 'undetermined',
lane = lane,
readnumber = readnumber,
project = '',
pool_and_library = '' )
undet_fastq.add(thisfile)
afile_to_filename[thisfile] = undet_file_absolute
# And process the set we just collected
for f in undet_fastq:
fastq_file = afile_to_filename[f]
readnumber = translate_read_number(f, undet_fastq)
# eg. 160811_D00261_0355_BC9DA7ANXX_4_unassigned_1.fastq.gz
new_filename = "{runid}_{f.lane}_unassigned_{readnumber}.fastq.gz".format(**locals())
new_filename_absolute = os.path.join ( output_dir, new_filename )
#See comment above
try:
log( "mv %s %s" % ( os.path.join("demultiplexing", filename), new_filename) )
with open(new_filename_absolute, 'x') as tmp_fd:
os.rename(fastq_file, new_filename_absolute)
except FileExistsError:
log("# FileExistsError renaming %s" % new_filename)
raise
# Cleanup empty project directories (as per Cleanup.py) then warn if any dirs
# remain (or, if fact, that's an error).
for lane_dir in glob(os.path.join(output_dir, "demultiplexing", "lane*")):
for proj in list(proj_seen):
for root, dirs, files in os.walk(
os.path.join(lane_dir, proj),
topdown=False ):
try:
os.rmdir(root)
log("rmdir '%s'" % root)
except Exception:
# Assume it was non-empty.
ERRORS.add("Failed to remove all project directories from demultiplexing area.")
log("# could not remove dir '%s'" % root)
# And we cannot say the project is ready.
# TODO - Should I add it to pending??
proj_seen.discard(proj)
# Finally return the projects processed
return proj_seen
if __name__ == '__main__':
print("Running: " + ' '.join(sys.argv))
main(*sys.argv[1:])
if ERRORS: exit(1)
| 489 | 0 | 114 |
a38636dd78547775e23ac4f9750df3200cd8a98d | 2,268 | py | Python | vectorai/models/deployed/image.py | boba-and-beer/vectorai | 5244968e4a3622f6c536e96e1fa25719634e5b45 | [
"Apache-2.0"
] | 255 | 2020-09-30T12:32:20.000Z | 2022-03-19T16:12:35.000Z | vectorai/models/deployed/image.py | boba-and-beer/vectorai | 5244968e4a3622f6c536e96e1fa25719634e5b45 | [
"Apache-2.0"
] | 20 | 2020-10-01T06:14:35.000Z | 2021-04-12T07:22:57.000Z | vectorai/models/deployed/image.py | boba-and-beer/vectorai | 5244968e4a3622f6c536e96e1fa25719634e5b45 | [
"Apache-2.0"
] | 33 | 2020-10-01T20:52:39.000Z | 2022-03-18T07:17:25.000Z | import io
import base64
import requests
from .base import ViDeployedModel
from typing import List
| 27 | 71 | 0.533069 | import io
import base64
import requests
from .base import ViDeployedModel
from typing import List
class ViImage2Vec(ViDeployedModel):
def encode(self, image):
return requests.get(
url="{}/collection/encode_image".format(self.url),
params={
"username": self.username,
"api_key": self.api_key,
"collection_name": self.collection_name,
"image_url": image,
},
).json()
def bulk_encode(self, images: List[str]):
"""
Bulk convert text to vectors
"""
return requests.get(
url="{}/collection/bulk_encode_image".format(self.url),
params={
"username": self.username,
"api_key": self.api_key,
"collection_name": self.collection_name,
"image_urls": images,
},
).json()
@property
def __name__(self):
if self._name is None:
return "vectorai_image"
return self._name
@__name__.setter
def __name__(self, value):
self._name = value
class ViImageArray2Vec(ViDeployedModel):
def __init__(
self,
username,
api_key,
url=None,
collection_name="base",
vector_operation: str = "mean",
):
self.username = username
self.api_key = api_key
if url:
self.url = url
else:
self.url = "https://api.vctr.ai"
self.collection_name = collection_name
def encode(self, images):
return self._vector_operation(
requests.get(
url="{}/collection/bulk_encode_image".format(self.url),
params={
"username": self.username,
"api_key": self.api_key,
"collection_name": self.collection_name,
"image_urls": images,
},
).json(),
vector_operation=self.vector_operation,
)
@property
def __name__(self):
if self._name is None:
return "vectorai_image_array"
return self._name
@__name__.setter
def __name__(self, value):
self._name = value
| 1,392 | 731 | 46 |
299d61d1196dddc6c524858100a86c524836b0be | 2,599 | py | Python | scripts/automated-scripts/kit/scrapper.py | ksripathi/ui-1.0-toolkit | b32cd482b455ccfb0581ba3cee48108a02f7201c | [
"MIT"
] | null | null | null | scripts/automated-scripts/kit/scrapper.py | ksripathi/ui-1.0-toolkit | b32cd482b455ccfb0581ba3cee48108a02f7201c | [
"MIT"
] | null | null | null | scripts/automated-scripts/kit/scrapper.py | ksripathi/ui-1.0-toolkit | b32cd482b455ccfb0581ba3cee48108a02f7201c | [
"MIT"
] | null | null | null | #!/usr/bin/python
import re
import os
import sys
from bs4 import BeautifulSoup
if __name__ == '__main__':
try:
labContent = driveContentScrappers()
serializeLabContent(labContent)
except Exception, e:
print 'Exception! ', e
#raise
| 32.4875 | 120 | 0.734898 | #!/usr/bin/python
import re
import os
import sys
from bs4 import BeautifulSoup
def getIndexDotHtmlFiles():
return filter(lambda fname: True if re.match(r'index\.html*', fname) else False, \
os.listdir('.'))
def contentScrapper(fname):
#print 'I am in contentScrapper', fname
f = open(fname, "r")
srcHtml = f.read()
f.close()
regexSectionNameText = r'\([a-zA-Z ]*\) :'
regexSectionName = r'[a-zA-Z ]+'
regexCnt = r'cnt=[0-9]+'
experimentContent = BeautifulSoup(srcHtml)
sectionNameTable = str(experimentContent.find_all('table', id='tabtable')[0])
sectionNumber = re.findall(regexCnt, fname)[0]
sectionNumber = re.findall(regexCnt, sectionNameTable).index(sectionNumber) + 1
sectionName = re.findall(regexSectionName, str(re.findall(regexSectionNameText, experimentContent.title.string)[0]))[0]
sectionContent = experimentContent.find_all('div', attrs={'class': 'divContent'})
if len(sectionContent) == 0:
sectionContent = experimentContent.find_all('div', attrs={'class': 'divQuiz'})
if (len(sectionContent) != 0):
sectionContent = sectionContent[0]
else :
sectionContent = ''
return (sectionName, sectionContent)
def driveContentScrappers():
indexDotHtmlFiles = getIndexDotHtmlFiles()
return map(contentScrapper, indexDotHtmlFiles)
def serializeLabContent(labContent):
#print labContent
f = open('template.html', "r")
labHtml = f.read()
f.close()
labTemplate = BeautifulSoup(labHtml)
articleSection = labTemplate.find_all('div', id="experiment-article-sections")[0]
sectionNumber = 1
for sectionName,sectionContent in labContent:
sectionTag = labTemplate.new_tag('section', id="experiment-article-section-"+str(sectionNumber))
articleSection.append(sectionTag)
iconTag = labTemplate.new_tag('div', id="experiment-article-section-"+str(sectionNumber)+"-icon")
iconTag['class']='icon'
sectionTag.append(iconTag)
headingTag = labTemplate.new_tag('div', id="experiment-article-section-"+str(sectionNumber)+"-heading")
headingTag['class']='heading'
headingTag.append(sectionName)
sectionTag.append(headingTag)
contentTag = labTemplate.new_tag('div', id="experiment-article-section-"+str(sectionNumber)+"-content")
contentTag['class']='content'
contentTag.append(sectionContent)
sectionTag.append(contentTag)
sectionNumber +=1
f = open('content.html', "w+")
labTemplate = labTemplate.prettify()
f.write(labTemplate.encode('utf-8'))
f.close()
if __name__ == '__main__':
try:
labContent = driveContentScrappers()
serializeLabContent(labContent)
except Exception, e:
print 'Exception! ', e
#raise
| 2,262 | 0 | 93 |
759d74d5bfe408f60d4fce2cff0968d98ac2f59c | 1,508 | py | Python | test/utils/command_to_test.py | gantzgraf/vape | f939cb527d72d852cb0919a57332110c15c5fd4a | [
"MIT"
] | 4 | 2020-03-25T06:09:39.000Z | 2021-03-23T11:22:00.000Z | test/utils/command_to_test.py | gantzgraf/vape | f939cb527d72d852cb0919a57332110c15c5fd4a | [
"MIT"
] | 1 | 2020-10-02T14:50:30.000Z | 2020-10-12T15:24:24.000Z | test/utils/command_to_test.py | gantzgraf/vape | f939cb527d72d852cb0919a57332110c15c5fd4a | [
"MIT"
] | 1 | 2021-02-20T11:32:34.000Z | 2021-02-20T11:32:34.000Z | #!/usr/bin/env python3
import sys
import re
arg_re = re.compile(r'--\S+')
if __name__ == '__main__':
if len(sys.argv) != 2:
sys.exit("Usage: {} tests.sh\n".format(sys.argv[0]))
main(sys.argv[1])
| 28.45283 | 67 | 0.408488 | #!/usr/bin/env python3
import sys
import re
arg_re = re.compile(r'--\S+')
def output_args(args, func):
print('''
def {}():
output = get_tmp_out()
test_args = dict('''.format(func))
for x in args:
print(" {}={},".format(x[0].replace("-", ""), x[1]))
print(''' output=output,
)
results, expected = run_test(test_args, output,
sys._getframe().f_code.co_name)
assert_equal(results, expected)
os.remove(output)
''')
def main(f):
with open(f, 'rt') as infile:
for line in infile:
if line.rstrip() == '':
continue
args = []
s = line.split()
for i in range(len(s) - 1):
if arg_re.match(s[i]):
if arg_re.match(s[i + 1]) or s[i + 1] == '|':
args.append((s[i], True))
else:
try:
x = int(s[i + 1])
except ValueError:
try:
x = float(s[i + 1])
except ValueError:
x = '"{}"'.format(s[i + 1])
args.append((s[i], x))
func = s[-1].split('/')[-1].replace('.txt', '')
output_args(args, func)
if __name__ == '__main__':
if len(sys.argv) != 2:
sys.exit("Usage: {} tests.sh\n".format(sys.argv[0]))
main(sys.argv[1])
| 1,245 | 0 | 46 |
0ffbe179ad5b73d75778abd1eb219fd40c357e69 | 2,367 | py | Python | stochastic_first_order/svrg.py | Hintonthu/opt_methods | e711ca708479c6fc99b7cad8fa2a078bd8d48cfd | [
"MIT"
] | 1 | 2020-07-17T08:46:18.000Z | 2020-07-17T08:46:18.000Z | stochastic_first_order/svrg.py | Hintonthu/opt_methods | e711ca708479c6fc99b7cad8fa2a078bd8d48cfd | [
"MIT"
] | null | null | null | stochastic_first_order/svrg.py | Hintonthu/opt_methods | e711ca708479c6fc99b7cad8fa2a078bd8d48cfd | [
"MIT"
] | null | null | null | import numpy as np
from optimizer import StochasticOptimizer
class Svrg(StochasticOptimizer):
"""
Stochastic variance-reduced gradient descent with constant stepsize.
Reference:
https://papers.nips.cc/paper/4937-accelerating-stochastic-gradient-descent-using-predictive-variance-reduction.pdf
Arguments:
lr (float, optional): an estimate of the inverse smoothness constant
"""
| 39.45 | 118 | 0.6109 | import numpy as np
from optimizer import StochasticOptimizer
class Svrg(StochasticOptimizer):
"""
Stochastic variance-reduced gradient descent with constant stepsize.
Reference:
https://papers.nips.cc/paper/4937-accelerating-stochastic-gradient-descent-using-predictive-variance-reduction.pdf
Arguments:
lr (float, optional): an estimate of the inverse smoothness constant
"""
def __init__(self, lr=None, batch_size=1, avoid_cache_miss=False, loopless=True,
loop_len=None, restart_prob=None, *args, **kwargs):
super(Svrg, self).__init__(*args, **kwargs)
self.lr = lr
self.batch_size = batch_size
self.avoid_cache_miss = avoid_cache_miss
self.loopless = loopless
self.loop_len = loop_len
self.restart_prob = restart_prob
if loopless and restart_prob is None:
self.restart_prob = batch_size / self.loss.n
elif not loopless and loop_len is None:
self.loop_len = self.loss.n // batch_size
def step(self):
new_loop = self.loopless and np.random.uniform() < self.restart_prob
if not self.loopless and self.loop_it == self.loop_len:
new_loop = True
if new_loop or self.it == 0:
self.x_old = self.x.copy()
self.full_grad_old = self.loss.gradient(self.x_old)
self.vr_grad = self.full_grad_old.copy()
if not self.loopless:
self.loop_it = 0
self.loops += 1
else:
if self.avoid_cache_miss:
i = np.random.choice(self.loss.n)
idx = np.arange(i, i + self.batch_size)
idx %= self.loss.n
else:
idx = np.random.choice(self.loss.n, size=self.batch_size)
stoch_grad = self.loss.stochastic_gradient(self.x, idx=idx)
stoch_grad_old = self.loss.stochastic_gradient(self.x_old, idx=idx)
self.vr_grad = stoch_grad - stoch_grad_old + self.full_grad_old
self.x -= self.lr * self.vr_grad
self.loop_it += 1
def init_run(self, *args, **kwargs):
super(Svrg, self).init_run(*args, **kwargs)
self.loop_it = 0
self.loops = 0
if self.lr is None:
self.lr = 0.5 / self.loss.batch_smoothness(self.batch_size)
| 1,858 | 0 | 92 |
966179a164e51151330f8b14be5ec6adc1c899a7 | 3,398 | py | Python | coax/utils/_misc_test.py | sleepy-owl/coax | 37c3e667b81537768beb25bb59d0f05124624128 | [
"MIT"
] | null | null | null | coax/utils/_misc_test.py | sleepy-owl/coax | 37c3e667b81537768beb25bb59d0f05124624128 | [
"MIT"
] | null | null | null | coax/utils/_misc_test.py | sleepy-owl/coax | 37c3e667b81537768beb25bb59d0f05124624128 | [
"MIT"
] | null | null | null | # ------------------------------------------------------------------------------------------------ #
# MIT License #
# #
# Copyright (c) 2020, Microsoft Corporation #
# #
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software #
# and associated documentation files (the "Software"), to deal in the Software without #
# restriction, including without limitation the rights to use, copy, modify, merge, publish, #
# distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the #
# Software is furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in all copies or #
# substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING #
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND #
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, #
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #
# ------------------------------------------------------------------------------------------------ #
import os
import tempfile
from ..utils import jit
from ._misc import dump, dumps, load, loads
| 41.950617 | 100 | 0.472631 | # ------------------------------------------------------------------------------------------------ #
# MIT License #
# #
# Copyright (c) 2020, Microsoft Corporation #
# #
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software #
# and associated documentation files (the "Software"), to deal in the Software without #
# restriction, including without limitation the rights to use, copy, modify, merge, publish, #
# distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the #
# Software is furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in all copies or #
# substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING #
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND #
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, #
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #
# ------------------------------------------------------------------------------------------------ #
import os
import tempfile
from ..utils import jit
from ._misc import dump, dumps, load, loads
def test_dump_load():
with tempfile.TemporaryDirectory() as d:
a = [13]
b = {'a': a}
# references preserved
dump((a, b), os.path.join(d, 'ab.pkl.lz4'))
a_new, b_new = load(os.path.join(d, 'ab.pkl.lz4'))
b_new['a'].append(7)
assert b_new['a'] == [13, 7]
assert a_new == [13, 7]
# references not preserved
dump(a, os.path.join(d, 'a.pkl.lz4'))
dump(b, os.path.join(d, 'b.pkl.lz4'))
a_new = load(os.path.join(d, 'a.pkl.lz4'))
b_new = load(os.path.join(d, 'b.pkl.lz4'))
b_new['a'].append(7)
assert b_new['a'] == [13, 7]
assert a_new == [13]
def test_dumps_loads():
a = [13]
b = {'a': a}
# references preserved
s = dumps((a, b))
a_new, b_new = loads(s)
b_new['a'].append(7)
assert b_new['a'] == [13, 7]
assert a_new == [13, 7]
# references not preserved
s_a = dumps(a)
s_b = dumps(b)
a_new = loads(s_a)
b_new = loads(s_b)
b_new['a'].append(7)
assert b_new['a'] == [13, 7]
assert a_new == [13]
def test_dumps_loads_jitted_function():
@jit
def f(x):
return 13 * x
# references preserved
s = dumps(f)
f_new = loads(s)
assert f_new(11) == f(11) == 143
| 1,210 | 0 | 69 |
e38deed17782b0348e10afe08bb09fd3c3b2a515 | 1,082 | py | Python | web_scraping/utils.py | wangfan950309/web_scraping | 3896dc37b5b60a64fda6ef68c1b8155d9255ccca | [
"MIT"
] | 1 | 2019-07-05T04:26:58.000Z | 2019-07-05T04:26:58.000Z | web_scraping/utils.py | wangfan950309/web_scraping | 3896dc37b5b60a64fda6ef68c1b8155d9255ccca | [
"MIT"
] | null | null | null | web_scraping/utils.py | wangfan950309/web_scraping | 3896dc37b5b60a64fda6ef68c1b8155d9255ccca | [
"MIT"
] | 1 | 2019-07-07T02:22:49.000Z | 2019-07-07T02:22:49.000Z | import pandas as pd
import math
import numpy as np
from web_scraping.scraping import *
| 30.914286 | 76 | 0.540665 | import pandas as pd
import math
import numpy as np
from web_scraping.scraping import *
def search_algorithm(keywords):
search_key = keywords
keys = search_key.split()
key_dic = dict()
df = pd.read_csv("%s/data/Job_search.csv" % main_path(), index_col=None)
search_field = df['Job_Name'].tolist()
for key in keys:
idf_counter = 0
for field in search_field:
if key in field:
idf_counter += 1
key_dic[key] = math.log(len(df.index)/1+idf_counter)
df['TF-IDF'] = 0
for key in keys:
tf = []
for field in search_field:
counter = 0
job = field.split()
for i in job:
if key.lower() == i.lower():
counter += 1
tf.append(counter/len(job))
df[key] = np.array(tf)
df[key] = df[key]*key_dic[key] ##### TF * IDF = TF-IDF
df['TF-IDF'] = df['TF-IDF'] + df[key]
df = df.sort_values(by=['TF-IDF'], ascending=False)
df = df.loc[df['TF-IDF'] != 0]
df = df.iloc[:, :4]
return df
| 972 | 0 | 23 |
dec8d2fcabc5d2693c110d790d4c8a005a3bc037 | 148 | py | Python | main/admin.py | Unviray/Internet-consumption | b6b7a7d27c20f3a0dad24f801be1c19f2f685bb2 | [
"MIT"
] | null | null | null | main/admin.py | Unviray/Internet-consumption | b6b7a7d27c20f3a0dad24f801be1c19f2f685bb2 | [
"MIT"
] | null | null | null | main/admin.py | Unviray/Internet-consumption | b6b7a7d27c20f3a0dad24f801be1c19f2f685bb2 | [
"MIT"
] | null | null | null | """
main.admin
==========
"""
from django.contrib import admin
from .models import InternetConsumption
admin.site.register(InternetConsumption)
| 12.333333 | 40 | 0.72973 | """
main.admin
==========
"""
from django.contrib import admin
from .models import InternetConsumption
admin.site.register(InternetConsumption)
| 0 | 0 | 0 |
67d58d168e90ba1d0a25d1919d7162e9097f1c6e | 9,043 | py | Python | virt/ansible-2.3.0/lib/python2.7/site-packages/ansible/modules/cloud/openstack/os_user.py | lakhlaifi/RedHat-Ansible | 27c5077cced9d416081fcd5d69ea44bca0317fa4 | [
"Apache-2.0"
] | null | null | null | virt/ansible-2.3.0/lib/python2.7/site-packages/ansible/modules/cloud/openstack/os_user.py | lakhlaifi/RedHat-Ansible | 27c5077cced9d416081fcd5d69ea44bca0317fa4 | [
"Apache-2.0"
] | null | null | null | virt/ansible-2.3.0/lib/python2.7/site-packages/ansible/modules/cloud/openstack/os_user.py | lakhlaifi/RedHat-Ansible | 27c5077cced9d416081fcd5d69ea44bca0317fa4 | [
"Apache-2.0"
] | 1 | 2020-02-13T14:24:57.000Z | 2020-02-13T14:24:57.000Z | #!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_user
short_description: Manage OpenStack Identity Users
extends_documentation_fragment: openstack
author: David Shrewsbury
version_added: "2.0"
description:
- Manage OpenStack Identity users. Users can be created,
updated or deleted using this module. A user will be updated
if I(name) matches an existing user and I(state) is present.
The value for I(name) cannot be updated without deleting and
re-creating the user.
options:
name:
description:
- Username for the user
required: true
password:
description:
- Password for the user
required: false
default: None
update_password:
required: false
default: always
choices: ['always', 'on_create']
version_added: "2.3"
description:
- C(always) will attempt to update password. C(on_create) will only
set the password for newly created users.
email:
description:
- Email address for the user
required: false
default: None
default_project:
description:
- Project name or ID that the user should be associated with by default
required: false
default: None
domain:
description:
- Domain to create the user in if the cloud supports domains
required: false
default: None
enabled:
description:
- Is the user enabled
required: false
default: True
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
availability_zone:
description:
- Ignored. Present for backwards compatability
required: false
requirements:
- "python >= 2.6"
- "shade"
'''
EXAMPLES = '''
# Create a user
- os_user:
cloud: mycloud
state: present
name: demouser
password: secret
email: demo@example.com
domain: default
default_project: demo
# Delete a user
- os_user:
cloud: mycloud
state: absent
name: demouser
# Create a user but don't update password if user exists
- os_user:
cloud: mycloud
state: present
name: demouser
password: secret
update_password: on_create
email: demo@example.com
domain: default
default_project: demo
'''
RETURN = '''
user:
description: Dictionary describing the user.
returned: On success when I(state) is 'present'
type: dictionary
contains:
default_project_id:
description: User default project ID. Only present with Keystone >= v3.
type: string
sample: "4427115787be45f08f0ec22a03bfc735"
domain_id:
description: User domain ID. Only present with Keystone >= v3.
type: string
sample: "default"
email:
description: User email address
type: string
sample: "demo@example.com"
id:
description: User ID
type: string
sample: "f59382db809c43139982ca4189404650"
name:
description: User name
type: string
sample: "demouser"
'''
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
| 31.729825 | 84 | 0.608095 | #!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_user
short_description: Manage OpenStack Identity Users
extends_documentation_fragment: openstack
author: David Shrewsbury
version_added: "2.0"
description:
- Manage OpenStack Identity users. Users can be created,
updated or deleted using this module. A user will be updated
if I(name) matches an existing user and I(state) is present.
The value for I(name) cannot be updated without deleting and
re-creating the user.
options:
name:
description:
- Username for the user
required: true
password:
description:
- Password for the user
required: false
default: None
update_password:
required: false
default: always
choices: ['always', 'on_create']
version_added: "2.3"
description:
- C(always) will attempt to update password. C(on_create) will only
set the password for newly created users.
email:
description:
- Email address for the user
required: false
default: None
default_project:
description:
- Project name or ID that the user should be associated with by default
required: false
default: None
domain:
description:
- Domain to create the user in if the cloud supports domains
required: false
default: None
enabled:
description:
- Is the user enabled
required: false
default: True
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
availability_zone:
description:
- Ignored. Present for backwards compatability
required: false
requirements:
- "python >= 2.6"
- "shade"
'''
EXAMPLES = '''
# Create a user
- os_user:
cloud: mycloud
state: present
name: demouser
password: secret
email: demo@example.com
domain: default
default_project: demo
# Delete a user
- os_user:
cloud: mycloud
state: absent
name: demouser
# Create a user but don't update password if user exists
- os_user:
cloud: mycloud
state: present
name: demouser
password: secret
update_password: on_create
email: demo@example.com
domain: default
default_project: demo
'''
RETURN = '''
user:
description: Dictionary describing the user.
returned: On success when I(state) is 'present'
type: dictionary
contains:
default_project_id:
description: User default project ID. Only present with Keystone >= v3.
type: string
sample: "4427115787be45f08f0ec22a03bfc735"
domain_id:
description: User domain ID. Only present with Keystone >= v3.
type: string
sample: "default"
email:
description: User email address
type: string
sample: "demo@example.com"
id:
description: User ID
type: string
sample: "f59382db809c43139982ca4189404650"
name:
description: User name
type: string
sample: "demouser"
'''
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
def _needs_update(params_dict, user):
for k, v in params_dict.items():
if k not in ('password', 'update_password') and user[k] != v:
return True
# We don't get password back in the user object, so assume any supplied
# password is a change.
if (params_dict['password'] is not None and
params_dict['update_password'] == 'always'):
return True
return False
def _get_domain_id(cloud, domain):
try:
# We assume admin is passing domain id
domain_id = cloud.get_domain(domain)['id']
except:
# If we fail, maybe admin is passing a domain name.
# Note that domains have unique names, just like id.
try:
domain_id = cloud.search_domains(filters={'name': domain})[0]['id']
except:
# Ok, let's hope the user is non-admin and passing a sane id
domain_id = domain
return domain_id
def _get_default_project_id(cloud, default_project):
project = cloud.get_project(default_project)
if not project:
module.fail_json(msg='Default project %s is not valid' % default_project)
return project['id']
def main():
argument_spec = openstack_full_argument_spec(
name=dict(required=True),
password=dict(required=False, default=None, no_log=True),
email=dict(required=False, default=None),
default_project=dict(required=False, default=None),
domain=dict(required=False, default=None),
enabled=dict(default=True, type='bool'),
state=dict(default='present', choices=['absent', 'present']),
update_password=dict(default='always', choices=['always',
'on_create']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(
argument_spec,
**module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
name = module.params['name']
password = module.params.pop('password')
email = module.params['email']
default_project = module.params['default_project']
domain = module.params['domain']
enabled = module.params['enabled']
state = module.params['state']
update_password = module.params['update_password']
try:
cloud = shade.openstack_cloud(**module.params)
user = cloud.get_user(name)
domain_id = None
if domain:
opcloud = shade.operator_cloud(**module.params)
domain_id = _get_domain_id(opcloud, domain)
if state == 'present':
if update_password in ('always', 'on_create'):
if not password:
msg = ("update_password is %s but a password value is "
"missing") % update_password
module.fail_json(msg=msg)
default_project_id = None
if default_project:
default_project_id = _get_default_project_id(cloud, default_project)
if user is None:
user = cloud.create_user(
name=name, password=password, email=email,
default_project=default_project_id, domain_id=domain_id,
enabled=enabled)
changed = True
else:
params_dict = {'email': email, 'enabled': enabled,
'password': password,
'update_password': update_password}
if domain_id is not None:
params_dict['domain_id'] = domain_id
if default_project_id is not None:
params_dict['default_project_id'] = default_project_id
if _needs_update(params_dict, user):
if update_password == 'always':
user = cloud.update_user(
user['id'], password=password, email=email,
default_project=default_project_id,
domain_id=domain_id, enabled=enabled)
else:
user = cloud.update_user(
user['id'], email=email,
default_project=default_project_id,
domain_id=domain_id, enabled=enabled)
changed = True
else:
changed = False
module.exit_json(changed=changed, user=user)
elif state == 'absent':
if user is None:
changed=False
else:
cloud.delete_user(user['id'])
changed=True
module.exit_json(changed=changed)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e), extra_data=e.extra_data)
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
| 4,738 | 0 | 92 |
33c2d5b3fc9db8a4cc6fabb36d76ee63fc85b395 | 423 | py | Python | python-libraries/collections/re/re_option_verbose.py | nkhn37/python-tech-sample-source | e8aea7ed3d810494682b3c2dde952ddd0f7acf84 | [
"MIT"
] | null | null | null | python-libraries/collections/re/re_option_verbose.py | nkhn37/python-tech-sample-source | e8aea7ed3d810494682b3c2dde952ddd0f7acf84 | [
"MIT"
] | null | null | null | python-libraries/collections/re/re_option_verbose.py | nkhn37/python-tech-sample-source | e8aea7ed3d810494682b3c2dde952ddd0f7acf84 | [
"MIT"
] | null | null | null | """正規表現モジュール re の使い方
オプション:空行/コメントを付与して正規表現を見やすくする (VERBOSE)
[説明ページ]
https://tech.nkhn37.net/python-re-options/#VERBOSE
"""
import re
text = '私のメールアドレスは、user_01@test.comです。'
ptrn = re.compile(r'''([a-z0-9_.+-]+) #local
@ #delimiter
([a-z0-9][a-z0-9-]*[a-z0-9]*\.)+[a-z]{2,} #domain''',
re.VERBOSE)
if result := ptrn.search(text):
print(result.group())
| 22.263158 | 75 | 0.550827 | """正規表現モジュール re の使い方
オプション:空行/コメントを付与して正規表現を見やすくする (VERBOSE)
[説明ページ]
https://tech.nkhn37.net/python-re-options/#VERBOSE
"""
import re
text = '私のメールアドレスは、user_01@test.comです。'
ptrn = re.compile(r'''([a-z0-9_.+-]+) #local
@ #delimiter
([a-z0-9][a-z0-9-]*[a-z0-9]*\.)+[a-z]{2,} #domain''',
re.VERBOSE)
if result := ptrn.search(text):
print(result.group())
| 0 | 0 | 0 |
671300b770bf9c3369c6c23582656d5224123c9a | 25,035 | py | Python | molmod/importer/importer.py | pragermh/mol-mod | 2cf1ea6808c0142bb6c885313fec0b26a3a917d6 | [
"CC0-1.0"
] | null | null | null | molmod/importer/importer.py | pragermh/mol-mod | 2cf1ea6808c0142bb6c885313fec0b26a3a917d6 | [
"CC0-1.0"
] | 15 | 2020-10-13T11:53:08.000Z | 2021-06-08T08:34:49.000Z | molmod/importer/importer.py | pragermh/mol-mod | 2cf1ea6808c0142bb6c885313fec0b26a3a917d6 | [
"CC0-1.0"
] | 4 | 2020-11-20T10:40:26.000Z | 2022-02-10T09:00:58.000Z | #!/usr/bin/env python3
"""
The mol-mod data importer takes an Excel or Tar file stream, then rearranges
and inserts the data into the database. The script is executed inside a running
container using the import_excel.py wrapper.
"""
import hashlib
import json
import logging
import os
import re
import select
import sys
import tarfile
import tempfile
from collections import OrderedDict
from datetime import date
from io import BytesIO
from typing import List, Mapping, Optional
import pandas
import psycopg2
from psycopg2.extras import DictCursor
DEFAULT_MAPPING = os.path.join(os.path.dirname(__file__), 'data-mapping.json')
# Define pandas dict of sheets type. This is what's returned from read_excel()
PandasDict = Mapping[str, pandas.DataFrame]
def as_snake_case(text: str) -> str:
"""
Converts CamelCase to snake_case.
As a special case, this function converts `ID` to `_id` instead of `_i_d`.
"""
output = ""
for i, char in enumerate(text):
if char.isupper() and i != 0:
# preserve _id
if not (char == 'D' and text[i-1] == 'I'):
output += "_"
output += char.lower()
return output
def connect_db(pass_file='/run/secrets/postgres_pass'):
"""
Uses environment variables to set postgres connection settings, and
creates a database connection. A simple query to list datasets is then
used to verify the connection.
"""
try:
with open(pass_file) as password:
password = password.read()
except FileNotFoundError:
logging.error("Could not read postgres pwd from %s", pass_file)
sys.exit(1)
try:
connection = psycopg2.connect(
user=os.getenv('POSTGRES_USER', 'psql'),
password=password,
database=os.getenv('POSTGRES_DB', 'db'),
host=os.getenv('POSTGRES_HOST', 'localhost'),
port=os.getenv('POSTGRES_PORT', '5432')
)
logging.info("Connected to PostgreSQL database")
cursor = connection.cursor(cursor_factory=DictCursor)
cursor.execute("SELECT * FROM public.dataset;")
logging.debug("Database connection verified")
except psycopg2.OperationalError as err:
logging.error("Could not connect to postgres database")
logging.error(err)
sys.exit(1)
return connection, cursor
def get_base_query(table_mapping: dict):
"""
Creates an SQL insert query base using the given `table_mapping`.
Note that the retured query will not be complete as it will not include any
data values.
"""
field_map = OrderedDict()
for field, settings in table_mapping.items():
if field in ['targetTable']:
continue
field_map[field] = f'"{settings.get("field", as_snake_case(field))}"'
fields = ", ".join(list(field_map.values()))
return f"INSERT INTO {table_mapping['targetTable']} ({fields})", field_map
def format_value(value):
"""
Formats `value` in a manner suitable for postgres insert queries.
"""
if isinstance(value, (str, date)):
return f"'{value}'"
if value is None:
return 'NULL'
return value
def format_values(data: pandas.DataFrame, mapping: dict,
start: int = 0, end: Optional[int] = 0) -> str:
"""
Formats the values in `data` according to the given `mapping` in a way that
is suitable for database insert queries. Only values from `start` to `end`
will be used.
"""
values = []
for i in range(start, end):
value = []
for field in mapping:
value += [format_value(data[field][i])]
values += [f'({", ".join(map(str, value))})']
return values
def insert_common(data: pandas.DataFrame, mapping: dict, db_cursor: DictCursor,
batch_size: int = 1000):
"""
Inserts `data` into the database based on the given `mapping`.
"""
base_query, field_mapping = get_base_query(mapping)
total = len(data.values)
start = 0
end = min(total, batch_size)
while start < total:
logging.info(" * inserting %s to %s", start, end)
values = format_values(data, field_mapping, start, end)
query = f"{base_query} VALUES {', '.join(values)};"
try:
db_cursor.execute(query)
except psycopg2.Error as err:
logging.error(err)
logging.error("No data were imported.")
sys.exit(1)
start = end
end = min(total, end + batch_size)
def insert_dataset(data: pandas.DataFrame, mapping: dict,
db_cursor: DictCursor) -> int:
"""
Inserts a single dataset into the database, and returns the database `pid`.
"""
base_query, field_mapping = get_base_query(mapping['dataset'])
if len(data.values) != 1:
logging.error("There must be exactly one dataset to insert")
sys.exit(1)
values = format_values(data, field_mapping, 0, 1)
query = f"{base_query} VALUES {', '.join(values)} RETURNING pid;"
try:
db_cursor.execute(query)
except psycopg2.Error as err:
logging.error(err)
logging.error("No data were imported.")
sys.exit(1)
return db_cursor.fetchall()[0]['pid']
def insert_events(data: pandas.DataFrame, mapping: dict, db_cursor: DictCursor,
batch_size: int = 1000) -> pandas.DataFrame:
"""
Inserts sampling events, reeturning the given dataframe with updated
`pid`'s from the database.
"""
base_query, field_mapping = get_base_query(mapping['event'])
total = len(data.values)
start = 0
end = min(total, batch_size)
pids = []
while start < total:
logging.info(" * inserting %s to %s", start, end)
values = format_values(data, field_mapping, start, end)
query = f"{base_query} VALUES {', '.join(values)} RETURNING pid;"
try:
db_cursor.execute(query)
pids += [r['pid'] for r in db_cursor.fetchall()]
except psycopg2.Error as err:
logging.error(err)
logging.error("No data were imported.")
sys.exit(1)
start = end
end = min(total, end + batch_size)
# assign pids to data for future joins
return data.assign(pid=pids)
def insert_asvs(data: pandas.DataFrame, mapping: dict, db_cursor: DictCursor,
batch_size: int = 1000) -> (pandas.DataFrame, int):
"""
Inserts asv's into the database, returning the database `pid`'s. Unlike the
other categories asv conflicts returns the id of the previously registered
entry.
"""
base_query, field_mapping = get_base_query(mapping['asv'])
total = len(data.values)
start = 0
end = min(total, batch_size)
# get max asv_id before insert (this helps us figure out which asv's were
# already in the database).
db_cursor.execute("SELECT MAX(pid) FROM asv;")
old_max_pid = db_cursor.fetchone()[0]
pids = []
while start < total:
logging.info(" * inserting %s to %s", start, end)
values = format_values(data, field_mapping, start, end)
query = f"{base_query} VALUES {', '.join(values)} " + \
"ON CONFLICT (asv_sequence) DO UPDATE SET pid = asv.pid " + \
"RETURNING pid;"
try:
db_cursor.execute(query)
pids += [r['pid'] for r in db_cursor.fetchall()]
except psycopg2.Error as err:
logging.error(err)
logging.error("No data were imported.")
sys.exit(1)
start = end
end = min(total, end + batch_size)
# assign pids to data for future joins
return data.assign(pid=pids), old_max_pid or 0
def read_data_file(data_file: str, sheets: List[str]):
"""
Opens and reads the given `sheets` from `data_file`. `data_file` must be a
valid excel or tar file.
"""
# Check input file format
is_tar = tarfile.is_tarfile(data_file)
if is_tar:
tar = tarfile.open(data_file)
else:
try:
pandas.read_excel(data_file)
except (ValueError, KeyError):
logging.error("Input neither recognized as tar nor as Excel.")
sys.exit(1)
data = {}
# Read one sheet at the time, to catch any missing sheets
for sheet in sheets:
# Skip occurrences and asvs, as they are taken from asv-table sheet
if sheet in ['asv', 'occurrence']:
continue
try:
if is_tar:
# Find correct file in tar archive
content = None
for member in tar:
# Ignore parent dir, if any
member_name = os.path.basename(member.name)
if member_name.split('.')[0] == sheet:
csv_file = tar.extractfile(member)
content = BytesIO(csv_file.read())
csv_file.close()
if not content:
raise KeyError
try:
data[sheet] = pandas.read_csv(content)
except Exception:
logging.error("Input file '%s' could not be read. "
"Please inpect file.", member.name)
sys.exit(1)
else:
data[sheet] = pandas.read_excel(data_file, sheet_name=sheet)
except KeyError:
logging.error("Input sheet '%s' not found. Aborting.", sheet)
sys.exit(1)
logging.info("%s file read", "Tar" if is_tar else "Excel")
for sheet in data:
# Drop empty rows and columns, if any
data[sheet] = data[sheet].dropna(how='all')
data[sheet] = data[sheet].drop(data[sheet].filter(regex="Unnamed"),
axis='columns')
# Drop 'domain' column if e.g. ampliseq has included that
for sheet in ['asv-table', 'annotation']:
data[sheet] = data[sheet].drop(columns=['domain'], errors='ignore')
return data
def handle_dates(dates: pandas.Series):
'''
Removes time digits (e.g. 00:00:00) from (Excel) date / timestamp field,
as they mess up validation. Does nothing if field is text / string.
'''
try:
dates = dates.dt.date
# E.g. if field is text
except AttributeError:
pass
return dates
def run_import(data_file: str, mapping_file: str, batch_size: int = 1000,
validate: bool = True, dry_run: bool = False):
"""
Inserts the data from data_file into the database using the mapping_file.
"""
logging.info("Connecting to database")
connection, cursor = connect_db()
logging.info("Loading mapping file")
try:
mapping = json.load(open(mapping_file))
except json.decoder.JSONDecodeError as err:
filename = os.path.basename(mapping_file)
logging.error(f'There is an error in {filename}: {err}')
sys.exit(1)
logging.info("Loading data file")
data = read_data_file(data_file, list(mapping.keys()))
#
# Derive occurrence and asv 'sheets' from asv-table sheet.
#
# We do this already here, to include asv and occurrence fields subsequent
# validation (which expects 'unpivoted' rows). This means, however,
# that asv-table defaults (added in data-mapping.json) will have no effects
# on occurrences or asvs.
#
try:
# 'Unpivot' event columns into rows, keeping 'id_columns' as columns
id_columns = ['asv_id_alias', 'DNA_sequence', 'associatedSequences',
'kingdom', 'phylum', 'class', 'order', 'family', 'genus',
'specificEpithet', 'infraspecificEpithet', 'otu']
occurrences = data['asv-table'] \
.melt(id_columns,
# Store event column header and values as:
var_name='event_id_alias',
value_name='organism_quantity')
except KeyError:
logging.error('Input files seem to not have been read properly. '
'Please, check dimensions (#rows, #cols) below:')
for sheet in ['dataset', 'emof', 'mixs', 'asv-table', 'annotation']:
logging.error(f'Sheet {sheet} has dimensions {data[sheet].shape}')
logging.error('Excel files exported from R have caused this problem '
'before. Try opening and saving input in Excel, '
'or importing data as *.tar.gz instead.')
sys.exit(1)
# Remove rows with organism_quantity 0,
# and reset index so that removed rows are no longer referenced
# As we do this before validation, we need to catch potential TypeError
try:
occurrences = occurrences[occurrences.organism_quantity > 0]
except TypeError:
logging.error('Counts in asv-table include non-numeric values. '
'No data were imported.')
sys.exit(1)
else:
occurrences.reset_index(inplace=True)
# Store as 'sheet' in data object
data['occurrence'] = occurrences
# Also create asv 'sheet'
data['asv'] = occurrences[['asv_id_alias', 'DNA_sequence']]
# Make sure we have unique asv rows,
# to avoid ON CONFLICT - DO UPDATE errors in insert_asvs
data['asv'] = data['asv'].drop_duplicates()
data['asv'].reset_index(inplace=True)
# Check for field differences between data input and mapping
logging.info("Checking fields")
if not compare_fields(data, mapping):
logging.error('No data were imported.')
sys.exit(1)
# Deal with Excel timestamps
# Requires date fields to exist, so do not move ahead of field check!
data['event']['eventDate'] = handle_dates(data['event']['eventDate'])
data['annotation']['date_identified'] = \
handle_dates(data['annotation']['date_identified'])
if validate:
logging.info("Validating input data")
if not run_validation(data, mapping):
logging.error("No data were imported.")
sys.exit(1)
if not compare_aliases(data):
logging.error("No data were imported.")
sys.exit(1)
logging.info("Updating defaults")
update_defaults(data, mapping)
# Replace remaining missing values with None.
# These will be transformed by format_value, and inserted into db as [null]
for sheet in data.keys():
data[sheet] = data[sheet].where(pandas.notnull(data[sheet]), None)
#
# Insert DATASET
#
logging.info("Inserting data")
logging.info(" * dataset")
dataset = insert_dataset(data['dataset'], mapping, cursor)
#
# Insert EVENTS
#
# Get 'event_pid' from dataset and add as new column
data['event'] = data['event'].assign(dataset_pid=lambda _: dataset)
logging.info(" * event")
data['event'] = insert_events(data['event'], mapping, cursor, batch_size)
#
# Insert MIXS
#
# Join with 'event' to get 'event_pid' as 'pid'
events = data['event'].set_index('event_id_alias')
data['mixs'] = data['mixs'].join(events['pid'], on='event_id_alias')
logging.info(" * mixs")
insert_common(data['mixs'], mapping['mixs'], cursor, batch_size)
#
# Insert EMOF
#
# Join with 'event' to get 'event_pid'
data['emof'] = data['emof'] \
.join(events['pid'], on='event_id_alias')
data['emof'].rename(columns={'pid': 'event_pid'}, inplace=True)
logging.info(" * emof")
insert_common(data['emof'], mapping['emof'], cursor, batch_size)
#
# Insert ASV
#
# Generate 'asv_id' as ASV:<md5-checksum of 'DNA_sequence'>
data['asv']['asv_id'] = [f'ASV:{hashlib.md5(s.encode()).hexdigest()}'
for s in data['asv']['DNA_sequence']]
logging.info(" * asvs")
data['asv'], old_max_asv = insert_asvs(data['asv'], mapping,
cursor, batch_size)
# Drop asv_id column again, as it confuses pandas
del data['asv']['asv_id']
#
# Insert TAXON_ANNOTATION
#
# Join with asv to add 'asv_pid'
asvs = data['asv'].set_index('asv_id_alias')
# Use inner join so that annotation is only added for new asvs
data['annotation'] = data['annotation'] \
.join(asvs['pid'], on='asv_id_alias', how='inner')
data['annotation'].rename(columns={'pid': 'asv_pid'}, inplace=True)
annotation = data['annotation'][data['annotation'].asv_pid > old_max_asv]
annotation.reset_index(inplace=True)
logging.info(" * annotations")
insert_common(annotation, mapping['annotation'], cursor, batch_size)
#
# Insert OCCURRENCE
#
# Join with asvs to add 'asv_pid'
occurrences = data['occurrence'].join(asvs['pid'], on='asv_id_alias')
occurrences.rename(columns={'pid': 'asv_pid'}, inplace=True)
# Set contributor´s taxon ranks to empty strings
# to allow for concatenation
tax_fields = ["kingdom", "phylum", "class", "order", "family", "genus",
"specificEpithet", "infraspecificEpithet", "otu"]
occurrences[tax_fields] = occurrences[tax_fields].fillna('')
# Join with events to add 'event_pid'
occurrences = occurrences.join(events, on='event_id_alias')
occurrences.rename(columns={'pid': 'event_pid'}, inplace=True)
# Concatenate contributor´s taxon rank fields
occurrences['previous_identifications'] = \
["|".join(z) for z in zip(*[occurrences[f] for f in tax_fields])]
logging.info(" * occurrences")
insert_common(occurrences, mapping['occurrence'], cursor, batch_size)
#
# Commit or Roll back
#
if dry_run:
logging.info("Dry run, rolling back changes")
connection.rollback()
else:
logging.info("Committing changes")
connection.commit()
def run_validation(data: PandasDict, mapping: dict):
"""
Uses `mapping` to run regexp validation of the fields in data.
"""
valid = True
for sheet, fields in mapping.items():
logging.info(" * %s", sheet)
for field, settings in fields.items():
previous_mistake = False
if 'validation' not in settings:
continue
try:
validator = re.compile(settings['validation'])
except re.error as err:
logging.error('Seems to be something wrong with a regular '
'expression used in validation. Please check '
'data-mapping.json.\nPython says: "%s"', err)
sys.exit(1)
for value in data[sheet][field]:
if not validator.fullmatch(str(value)):
valid = False
if not previous_mistake:
logging.warning(" - malformed value for %s", field)
logging.warning(' - validator: "%s"',
settings['validation'])
previous_mistake = True
logging.warning("offending value: %s", value)
if valid:
logging.info("Validation successful")
else:
logging.error("Validation failed")
return valid
def update_defaults(data: PandasDict, mapping: dict):
"""
Uses the `mapping` dict to set default values in `data`.
"""
for sheet, fields in mapping.items():
logging.info(" * %s", sheet)
for field, settings in fields.items():
if 'default' in settings:
default = settings['default']
# If field (listed in mapping) is missing from input form
if field not in data[sheet]:
# Add default to all rows
data[sheet][field] = [default]*len(data[sheet].values)
else:
# Fill only NaN cells
data[sheet][field].fillna(value=default, inplace=True)
def compare_sheets(data: PandasDict, sheet1: str, sheet2: str, field1: str,
field2: str = None):
"""
Compares full sets of values for corresponding fields in different sheets,
and returns False if these differ.
"""
if not field2:
field2 = field1
set1 = set(data[sheet1][field1])
set2 = set(data[sheet2][field2])
diff = set1.difference(set2)
if diff:
logging.error('%s value(s) %s in %s sheet not present in %s sheet.',
field1, diff, sheet1, sheet2)
return False
return True
def compare_aliases(data: PandasDict):
"""
Compares sets of key fields between sheets, and returns false if
if there is any difference.
"""
nodiff = True
# Check if any events in dependent sheets are missing from event sheet
for sheet in ['mixs', 'emof', 'occurrence']:
nodiff &= compare_sheets(data, sheet, 'event', 'event_id_alias')
# Check if any events lack occurrences
nodiff &= compare_sheets(data, 'event', 'occurrence', 'event_id_alias')
# Check if any asvs lack annotation
nodiff &= compare_sheets(data, 'asv', 'annotation', 'asv_id_alias')
return nodiff
def compare_fields(data: PandasDict, mapping: dict):
"""
Combines booleans returned from compare_sets, and returns False if
any of these are False (i.e. there is some diff)
"""
nodiff = True
# Check if any mapping fields are missing from data input
for sheet in mapping.keys():
set1 = set([k for k in mapping[sheet].keys()
if k not in [
# Fields not expected in input
'status', 'targetTable', 'asv_pid',
'dataset_pid', 'pid', 'event_pid', 'asv_id',
'previous_identifications', 'event_pid'
]])
set2 = set(data[sheet].keys())
diff = set1.difference(set2)
if diff:
logging.error(f'Fields {diff} are missing from sheet {sheet}.')
nodiff &= False
# Check if any input fields are missing from mapping
# Ignore fields that are always expected to be missing, e.g.
# Unpivoted event fields from asv-table - which are dataset-specific
events = data['occurrence']['event_id_alias'].tolist()
# Fields used for deriving db fields, or that are moved to derived sheets
expected = ['event_id_alias', 'associatedSequences', 'DNA_sequence',
'asv_sequence', 'asv_id_alias', 'order', 'phylum', 'kingdom',
'class', 'family', 'genus', 'infraspecificEpithet',
'index', 'otu', 'specificEpithet']
for sheet in data.keys():
set1 = set([k for k in data[sheet].keys()
if k not in events + expected])
set2 = set(mapping[sheet].keys())
diff = set1.difference(set2)
if diff:
logging.error(f'Fields {diff} not in mapping.')
nodiff &= False
return nodiff
if __name__ == '__main__':
import argparse
PARSER = argparse.ArgumentParser(description=__doc__)
PARSER.add_argument('--dry-run', action='store_true',
help=("Performs all transactions, but then issues a "
"rollback to the database so that it remains "
"unaffected. This will still increment "
"id sequences."))
PARSER.add_argument('--batch_size', type=int, default=100,
help=("Sets the max number of rows to be inserted for "
"each insert query."))
PARSER.add_argument('--mapping_file', default=DEFAULT_MAPPING,
help=("Sets the data mapping file to use for field "
"mapping and validation."))
PARSER.add_argument('--no-validation', action="store_true",
help="Do NOT validate the data before insertion.")
PARSER.add_argument('-v', '--verbose', action="count", default=0,
help="Increase logging verbosity (default: warning).")
PARSER.add_argument('-q', '--quiet', action="count", default=3,
help="Decrease logging verbosity (default: warning).")
ARGS = PARSER.parse_args()
# Set log level based on ./scripts/import_excel argument
# E.g: --v means log level = 10(3-2) = 10
logging.basicConfig(level=(10*(ARGS.quiet - ARGS.verbose)))
# Check if there is streaming data available from stdin
# (used in case importer is not executed via import_excel.py)
if not select.select([sys.stdin], [], [], 0.0)[0]:
logging.error("An excel input stream is required")
PARSER.print_help()
sys.exit(1)
# Write stdin to a temporary file
with tempfile.NamedTemporaryFile('rb+') as temp:
temp.write(sys.stdin.buffer.raw.read())
run_import(temp.name, ARGS.mapping_file, ARGS.batch_size,
# --no_validation -> not True = False
not ARGS.no_validation, ARGS.dry_run)
| 34.867688 | 79 | 0.602237 | #!/usr/bin/env python3
"""
The mol-mod data importer takes an Excel or Tar file stream, then rearranges
and inserts the data into the database. The script is executed inside a running
container using the import_excel.py wrapper.
"""
import hashlib
import json
import logging
import os
import re
import select
import sys
import tarfile
import tempfile
from collections import OrderedDict
from datetime import date
from io import BytesIO
from typing import List, Mapping, Optional
import pandas
import psycopg2
from psycopg2.extras import DictCursor
DEFAULT_MAPPING = os.path.join(os.path.dirname(__file__), 'data-mapping.json')
# Define pandas dict of sheets type. This is what's returned from read_excel()
PandasDict = Mapping[str, pandas.DataFrame]
def as_snake_case(text: str) -> str:
"""
Converts CamelCase to snake_case.
As a special case, this function converts `ID` to `_id` instead of `_i_d`.
"""
output = ""
for i, char in enumerate(text):
if char.isupper() and i != 0:
# preserve _id
if not (char == 'D' and text[i-1] == 'I'):
output += "_"
output += char.lower()
return output
def connect_db(pass_file='/run/secrets/postgres_pass'):
"""
Uses environment variables to set postgres connection settings, and
creates a database connection. A simple query to list datasets is then
used to verify the connection.
"""
try:
with open(pass_file) as password:
password = password.read()
except FileNotFoundError:
logging.error("Could not read postgres pwd from %s", pass_file)
sys.exit(1)
try:
connection = psycopg2.connect(
user=os.getenv('POSTGRES_USER', 'psql'),
password=password,
database=os.getenv('POSTGRES_DB', 'db'),
host=os.getenv('POSTGRES_HOST', 'localhost'),
port=os.getenv('POSTGRES_PORT', '5432')
)
logging.info("Connected to PostgreSQL database")
cursor = connection.cursor(cursor_factory=DictCursor)
cursor.execute("SELECT * FROM public.dataset;")
logging.debug("Database connection verified")
except psycopg2.OperationalError as err:
logging.error("Could not connect to postgres database")
logging.error(err)
sys.exit(1)
return connection, cursor
def get_base_query(table_mapping: dict):
"""
Creates an SQL insert query base using the given `table_mapping`.
Note that the retured query will not be complete as it will not include any
data values.
"""
field_map = OrderedDict()
for field, settings in table_mapping.items():
if field in ['targetTable']:
continue
field_map[field] = f'"{settings.get("field", as_snake_case(field))}"'
fields = ", ".join(list(field_map.values()))
return f"INSERT INTO {table_mapping['targetTable']} ({fields})", field_map
def format_value(value):
"""
Formats `value` in a manner suitable for postgres insert queries.
"""
if isinstance(value, (str, date)):
return f"'{value}'"
if value is None:
return 'NULL'
return value
def format_values(data: pandas.DataFrame, mapping: dict,
start: int = 0, end: Optional[int] = 0) -> str:
"""
Formats the values in `data` according to the given `mapping` in a way that
is suitable for database insert queries. Only values from `start` to `end`
will be used.
"""
values = []
for i in range(start, end):
value = []
for field in mapping:
value += [format_value(data[field][i])]
values += [f'({", ".join(map(str, value))})']
return values
def insert_common(data: pandas.DataFrame, mapping: dict, db_cursor: DictCursor,
batch_size: int = 1000):
"""
Inserts `data` into the database based on the given `mapping`.
"""
base_query, field_mapping = get_base_query(mapping)
total = len(data.values)
start = 0
end = min(total, batch_size)
while start < total:
logging.info(" * inserting %s to %s", start, end)
values = format_values(data, field_mapping, start, end)
query = f"{base_query} VALUES {', '.join(values)};"
try:
db_cursor.execute(query)
except psycopg2.Error as err:
logging.error(err)
logging.error("No data were imported.")
sys.exit(1)
start = end
end = min(total, end + batch_size)
def insert_dataset(data: pandas.DataFrame, mapping: dict,
db_cursor: DictCursor) -> int:
"""
Inserts a single dataset into the database, and returns the database `pid`.
"""
base_query, field_mapping = get_base_query(mapping['dataset'])
if len(data.values) != 1:
logging.error("There must be exactly one dataset to insert")
sys.exit(1)
values = format_values(data, field_mapping, 0, 1)
query = f"{base_query} VALUES {', '.join(values)} RETURNING pid;"
try:
db_cursor.execute(query)
except psycopg2.Error as err:
logging.error(err)
logging.error("No data were imported.")
sys.exit(1)
return db_cursor.fetchall()[0]['pid']
def insert_events(data: pandas.DataFrame, mapping: dict, db_cursor: DictCursor,
batch_size: int = 1000) -> pandas.DataFrame:
"""
Inserts sampling events, reeturning the given dataframe with updated
`pid`'s from the database.
"""
base_query, field_mapping = get_base_query(mapping['event'])
total = len(data.values)
start = 0
end = min(total, batch_size)
pids = []
while start < total:
logging.info(" * inserting %s to %s", start, end)
values = format_values(data, field_mapping, start, end)
query = f"{base_query} VALUES {', '.join(values)} RETURNING pid;"
try:
db_cursor.execute(query)
pids += [r['pid'] for r in db_cursor.fetchall()]
except psycopg2.Error as err:
logging.error(err)
logging.error("No data were imported.")
sys.exit(1)
start = end
end = min(total, end + batch_size)
# assign pids to data for future joins
return data.assign(pid=pids)
def insert_asvs(data: pandas.DataFrame, mapping: dict, db_cursor: DictCursor,
batch_size: int = 1000) -> (pandas.DataFrame, int):
"""
Inserts asv's into the database, returning the database `pid`'s. Unlike the
other categories asv conflicts returns the id of the previously registered
entry.
"""
base_query, field_mapping = get_base_query(mapping['asv'])
total = len(data.values)
start = 0
end = min(total, batch_size)
# get max asv_id before insert (this helps us figure out which asv's were
# already in the database).
db_cursor.execute("SELECT MAX(pid) FROM asv;")
old_max_pid = db_cursor.fetchone()[0]
pids = []
while start < total:
logging.info(" * inserting %s to %s", start, end)
values = format_values(data, field_mapping, start, end)
query = f"{base_query} VALUES {', '.join(values)} " + \
"ON CONFLICT (asv_sequence) DO UPDATE SET pid = asv.pid " + \
"RETURNING pid;"
try:
db_cursor.execute(query)
pids += [r['pid'] for r in db_cursor.fetchall()]
except psycopg2.Error as err:
logging.error(err)
logging.error("No data were imported.")
sys.exit(1)
start = end
end = min(total, end + batch_size)
# assign pids to data for future joins
return data.assign(pid=pids), old_max_pid or 0
def read_data_file(data_file: str, sheets: List[str]):
"""
Opens and reads the given `sheets` from `data_file`. `data_file` must be a
valid excel or tar file.
"""
# Check input file format
is_tar = tarfile.is_tarfile(data_file)
if is_tar:
tar = tarfile.open(data_file)
else:
try:
pandas.read_excel(data_file)
except (ValueError, KeyError):
logging.error("Input neither recognized as tar nor as Excel.")
sys.exit(1)
data = {}
# Read one sheet at the time, to catch any missing sheets
for sheet in sheets:
# Skip occurrences and asvs, as they are taken from asv-table sheet
if sheet in ['asv', 'occurrence']:
continue
try:
if is_tar:
# Find correct file in tar archive
content = None
for member in tar:
# Ignore parent dir, if any
member_name = os.path.basename(member.name)
if member_name.split('.')[0] == sheet:
csv_file = tar.extractfile(member)
content = BytesIO(csv_file.read())
csv_file.close()
if not content:
raise KeyError
try:
data[sheet] = pandas.read_csv(content)
except Exception:
logging.error("Input file '%s' could not be read. "
"Please inpect file.", member.name)
sys.exit(1)
else:
data[sheet] = pandas.read_excel(data_file, sheet_name=sheet)
except KeyError:
logging.error("Input sheet '%s' not found. Aborting.", sheet)
sys.exit(1)
logging.info("%s file read", "Tar" if is_tar else "Excel")
for sheet in data:
# Drop empty rows and columns, if any
data[sheet] = data[sheet].dropna(how='all')
data[sheet] = data[sheet].drop(data[sheet].filter(regex="Unnamed"),
axis='columns')
# Drop 'domain' column if e.g. ampliseq has included that
for sheet in ['asv-table', 'annotation']:
data[sheet] = data[sheet].drop(columns=['domain'], errors='ignore')
return data
def handle_dates(dates: pandas.Series):
'''
Removes time digits (e.g. 00:00:00) from (Excel) date / timestamp field,
as they mess up validation. Does nothing if field is text / string.
'''
try:
dates = dates.dt.date
# E.g. if field is text
except AttributeError:
pass
return dates
def run_import(data_file: str, mapping_file: str, batch_size: int = 1000,
validate: bool = True, dry_run: bool = False):
"""
Inserts the data from data_file into the database using the mapping_file.
"""
logging.info("Connecting to database")
connection, cursor = connect_db()
logging.info("Loading mapping file")
try:
mapping = json.load(open(mapping_file))
except json.decoder.JSONDecodeError as err:
filename = os.path.basename(mapping_file)
logging.error(f'There is an error in {filename}: {err}')
sys.exit(1)
logging.info("Loading data file")
data = read_data_file(data_file, list(mapping.keys()))
#
# Derive occurrence and asv 'sheets' from asv-table sheet.
#
# We do this already here, to include asv and occurrence fields subsequent
# validation (which expects 'unpivoted' rows). This means, however,
# that asv-table defaults (added in data-mapping.json) will have no effects
# on occurrences or asvs.
#
try:
# 'Unpivot' event columns into rows, keeping 'id_columns' as columns
id_columns = ['asv_id_alias', 'DNA_sequence', 'associatedSequences',
'kingdom', 'phylum', 'class', 'order', 'family', 'genus',
'specificEpithet', 'infraspecificEpithet', 'otu']
occurrences = data['asv-table'] \
.melt(id_columns,
# Store event column header and values as:
var_name='event_id_alias',
value_name='organism_quantity')
except KeyError:
logging.error('Input files seem to not have been read properly. '
'Please, check dimensions (#rows, #cols) below:')
for sheet in ['dataset', 'emof', 'mixs', 'asv-table', 'annotation']:
logging.error(f'Sheet {sheet} has dimensions {data[sheet].shape}')
logging.error('Excel files exported from R have caused this problem '
'before. Try opening and saving input in Excel, '
'or importing data as *.tar.gz instead.')
sys.exit(1)
# Remove rows with organism_quantity 0,
# and reset index so that removed rows are no longer referenced
# As we do this before validation, we need to catch potential TypeError
try:
occurrences = occurrences[occurrences.organism_quantity > 0]
except TypeError:
logging.error('Counts in asv-table include non-numeric values. '
'No data were imported.')
sys.exit(1)
else:
occurrences.reset_index(inplace=True)
# Store as 'sheet' in data object
data['occurrence'] = occurrences
# Also create asv 'sheet'
data['asv'] = occurrences[['asv_id_alias', 'DNA_sequence']]
# Make sure we have unique asv rows,
# to avoid ON CONFLICT - DO UPDATE errors in insert_asvs
data['asv'] = data['asv'].drop_duplicates()
data['asv'].reset_index(inplace=True)
# Check for field differences between data input and mapping
logging.info("Checking fields")
if not compare_fields(data, mapping):
logging.error('No data were imported.')
sys.exit(1)
# Deal with Excel timestamps
# Requires date fields to exist, so do not move ahead of field check!
data['event']['eventDate'] = handle_dates(data['event']['eventDate'])
data['annotation']['date_identified'] = \
handle_dates(data['annotation']['date_identified'])
if validate:
logging.info("Validating input data")
if not run_validation(data, mapping):
logging.error("No data were imported.")
sys.exit(1)
if not compare_aliases(data):
logging.error("No data were imported.")
sys.exit(1)
logging.info("Updating defaults")
update_defaults(data, mapping)
# Replace remaining missing values with None.
# These will be transformed by format_value, and inserted into db as [null]
for sheet in data.keys():
data[sheet] = data[sheet].where(pandas.notnull(data[sheet]), None)
#
# Insert DATASET
#
logging.info("Inserting data")
logging.info(" * dataset")
dataset = insert_dataset(data['dataset'], mapping, cursor)
#
# Insert EVENTS
#
# Get 'event_pid' from dataset and add as new column
data['event'] = data['event'].assign(dataset_pid=lambda _: dataset)
logging.info(" * event")
data['event'] = insert_events(data['event'], mapping, cursor, batch_size)
#
# Insert MIXS
#
# Join with 'event' to get 'event_pid' as 'pid'
events = data['event'].set_index('event_id_alias')
data['mixs'] = data['mixs'].join(events['pid'], on='event_id_alias')
logging.info(" * mixs")
insert_common(data['mixs'], mapping['mixs'], cursor, batch_size)
#
# Insert EMOF
#
# Join with 'event' to get 'event_pid'
data['emof'] = data['emof'] \
.join(events['pid'], on='event_id_alias')
data['emof'].rename(columns={'pid': 'event_pid'}, inplace=True)
logging.info(" * emof")
insert_common(data['emof'], mapping['emof'], cursor, batch_size)
#
# Insert ASV
#
# Generate 'asv_id' as ASV:<md5-checksum of 'DNA_sequence'>
data['asv']['asv_id'] = [f'ASV:{hashlib.md5(s.encode()).hexdigest()}'
for s in data['asv']['DNA_sequence']]
logging.info(" * asvs")
data['asv'], old_max_asv = insert_asvs(data['asv'], mapping,
cursor, batch_size)
# Drop asv_id column again, as it confuses pandas
del data['asv']['asv_id']
#
# Insert TAXON_ANNOTATION
#
# Join with asv to add 'asv_pid'
asvs = data['asv'].set_index('asv_id_alias')
# Use inner join so that annotation is only added for new asvs
data['annotation'] = data['annotation'] \
.join(asvs['pid'], on='asv_id_alias', how='inner')
data['annotation'].rename(columns={'pid': 'asv_pid'}, inplace=True)
annotation = data['annotation'][data['annotation'].asv_pid > old_max_asv]
annotation.reset_index(inplace=True)
logging.info(" * annotations")
insert_common(annotation, mapping['annotation'], cursor, batch_size)
#
# Insert OCCURRENCE
#
# Join with asvs to add 'asv_pid'
occurrences = data['occurrence'].join(asvs['pid'], on='asv_id_alias')
occurrences.rename(columns={'pid': 'asv_pid'}, inplace=True)
# Set contributor´s taxon ranks to empty strings
# to allow for concatenation
tax_fields = ["kingdom", "phylum", "class", "order", "family", "genus",
"specificEpithet", "infraspecificEpithet", "otu"]
occurrences[tax_fields] = occurrences[tax_fields].fillna('')
# Join with events to add 'event_pid'
occurrences = occurrences.join(events, on='event_id_alias')
occurrences.rename(columns={'pid': 'event_pid'}, inplace=True)
# Concatenate contributor´s taxon rank fields
occurrences['previous_identifications'] = \
["|".join(z) for z in zip(*[occurrences[f] for f in tax_fields])]
logging.info(" * occurrences")
insert_common(occurrences, mapping['occurrence'], cursor, batch_size)
#
# Commit or Roll back
#
if dry_run:
logging.info("Dry run, rolling back changes")
connection.rollback()
else:
logging.info("Committing changes")
connection.commit()
def run_validation(data: PandasDict, mapping: dict):
"""
Uses `mapping` to run regexp validation of the fields in data.
"""
valid = True
for sheet, fields in mapping.items():
logging.info(" * %s", sheet)
for field, settings in fields.items():
previous_mistake = False
if 'validation' not in settings:
continue
try:
validator = re.compile(settings['validation'])
except re.error as err:
logging.error('Seems to be something wrong with a regular '
'expression used in validation. Please check '
'data-mapping.json.\nPython says: "%s"', err)
sys.exit(1)
for value in data[sheet][field]:
if not validator.fullmatch(str(value)):
valid = False
if not previous_mistake:
logging.warning(" - malformed value for %s", field)
logging.warning(' - validator: "%s"',
settings['validation'])
previous_mistake = True
logging.warning("offending value: %s", value)
if valid:
logging.info("Validation successful")
else:
logging.error("Validation failed")
return valid
def update_defaults(data: PandasDict, mapping: dict):
"""
Uses the `mapping` dict to set default values in `data`.
"""
for sheet, fields in mapping.items():
logging.info(" * %s", sheet)
for field, settings in fields.items():
if 'default' in settings:
default = settings['default']
# If field (listed in mapping) is missing from input form
if field not in data[sheet]:
# Add default to all rows
data[sheet][field] = [default]*len(data[sheet].values)
else:
# Fill only NaN cells
data[sheet][field].fillna(value=default, inplace=True)
def compare_sheets(data: PandasDict, sheet1: str, sheet2: str, field1: str,
field2: str = None):
"""
Compares full sets of values for corresponding fields in different sheets,
and returns False if these differ.
"""
if not field2:
field2 = field1
set1 = set(data[sheet1][field1])
set2 = set(data[sheet2][field2])
diff = set1.difference(set2)
if diff:
logging.error('%s value(s) %s in %s sheet not present in %s sheet.',
field1, diff, sheet1, sheet2)
return False
return True
def compare_aliases(data: PandasDict):
"""
Compares sets of key fields between sheets, and returns false if
if there is any difference.
"""
nodiff = True
# Check if any events in dependent sheets are missing from event sheet
for sheet in ['mixs', 'emof', 'occurrence']:
nodiff &= compare_sheets(data, sheet, 'event', 'event_id_alias')
# Check if any events lack occurrences
nodiff &= compare_sheets(data, 'event', 'occurrence', 'event_id_alias')
# Check if any asvs lack annotation
nodiff &= compare_sheets(data, 'asv', 'annotation', 'asv_id_alias')
return nodiff
def compare_fields(data: PandasDict, mapping: dict):
"""
Combines booleans returned from compare_sets, and returns False if
any of these are False (i.e. there is some diff)
"""
nodiff = True
# Check if any mapping fields are missing from data input
for sheet in mapping.keys():
set1 = set([k for k in mapping[sheet].keys()
if k not in [
# Fields not expected in input
'status', 'targetTable', 'asv_pid',
'dataset_pid', 'pid', 'event_pid', 'asv_id',
'previous_identifications', 'event_pid'
]])
set2 = set(data[sheet].keys())
diff = set1.difference(set2)
if diff:
logging.error(f'Fields {diff} are missing from sheet {sheet}.')
nodiff &= False
# Check if any input fields are missing from mapping
# Ignore fields that are always expected to be missing, e.g.
# Unpivoted event fields from asv-table - which are dataset-specific
events = data['occurrence']['event_id_alias'].tolist()
# Fields used for deriving db fields, or that are moved to derived sheets
expected = ['event_id_alias', 'associatedSequences', 'DNA_sequence',
'asv_sequence', 'asv_id_alias', 'order', 'phylum', 'kingdom',
'class', 'family', 'genus', 'infraspecificEpithet',
'index', 'otu', 'specificEpithet']
for sheet in data.keys():
set1 = set([k for k in data[sheet].keys()
if k not in events + expected])
set2 = set(mapping[sheet].keys())
diff = set1.difference(set2)
if diff:
logging.error(f'Fields {diff} not in mapping.')
nodiff &= False
return nodiff
if __name__ == '__main__':
import argparse
PARSER = argparse.ArgumentParser(description=__doc__)
PARSER.add_argument('--dry-run', action='store_true',
help=("Performs all transactions, but then issues a "
"rollback to the database so that it remains "
"unaffected. This will still increment "
"id sequences."))
PARSER.add_argument('--batch_size', type=int, default=100,
help=("Sets the max number of rows to be inserted for "
"each insert query."))
PARSER.add_argument('--mapping_file', default=DEFAULT_MAPPING,
help=("Sets the data mapping file to use for field "
"mapping and validation."))
PARSER.add_argument('--no-validation', action="store_true",
help="Do NOT validate the data before insertion.")
PARSER.add_argument('-v', '--verbose', action="count", default=0,
help="Increase logging verbosity (default: warning).")
PARSER.add_argument('-q', '--quiet', action="count", default=3,
help="Decrease logging verbosity (default: warning).")
ARGS = PARSER.parse_args()
# Set log level based on ./scripts/import_excel argument
# E.g: --v means log level = 10(3-2) = 10
logging.basicConfig(level=(10*(ARGS.quiet - ARGS.verbose)))
# Check if there is streaming data available from stdin
# (used in case importer is not executed via import_excel.py)
if not select.select([sys.stdin], [], [], 0.0)[0]:
logging.error("An excel input stream is required")
PARSER.print_help()
sys.exit(1)
# Write stdin to a temporary file
with tempfile.NamedTemporaryFile('rb+') as temp:
temp.write(sys.stdin.buffer.raw.read())
run_import(temp.name, ARGS.mapping_file, ARGS.batch_size,
# --no_validation -> not True = False
not ARGS.no_validation, ARGS.dry_run)
| 0 | 0 | 0 |
ae9655bedf99d9ec44295bb869861c3240b8f1cb | 4,013 | py | Python | school/views/school_crc_views.py | tnemisteam/cdf-steps | 78896eebd08ba9975a2dece97f73dca9aa781238 | [
"MIT"
] | null | null | null | school/views/school_crc_views.py | tnemisteam/cdf-steps | 78896eebd08ba9975a2dece97f73dca9aa781238 | [
"MIT"
] | null | null | null | school/views/school_crc_views.py | tnemisteam/cdf-steps | 78896eebd08ba9975a2dece97f73dca9aa781238 | [
"MIT"
] | null | null | null | from django.views.generic import ListView, DetailView, CreateView, \
DeleteView, UpdateView, \
ArchiveIndexView, DateDetailView, \
DayArchiveView, MonthArchiveView, \
TodayArchiveView, WeekArchiveView, \
YearArchiveView
from school.models import School_crc
| 27.868056 | 74 | 0.711189 | from django.views.generic import ListView, DetailView, CreateView, \
DeleteView, UpdateView, \
ArchiveIndexView, DateDetailView, \
DayArchiveView, MonthArchiveView, \
TodayArchiveView, WeekArchiveView, \
YearArchiveView
from school.models import School_crc
class School_crcView(object):
model = School_crc
def get_template_names(self):
"""Nest templates within school_crc directory."""
tpl = super(School_crcView, self).get_template_names()[0]
app = self.model._meta.app_label
mdl = 'school_crc'
#self.template_name = tpl.replace(app, '{0}/{1}'.format(app, mdl))
self.template_name = tpl[:7]+'school_crc/'+tpl[7:]
return [self.template_name]
class School_crcDateView(School_crcView):
date_field = 'timestamp'
month_format = '%m'
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('school_school_crc_list')
class School_crcBaseListView(School_crcView):
paginate_by = 10
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('school_school_crc_list')
class School_crcArchiveIndexView(
School_crcDateView, School_crcBaseListView, ArchiveIndexView):
pass
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('school_school_crc_list')
class School_crcCreateView(School_crcView, CreateView):
pass
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('school_school_crc_list')
class School_crcDateDetailView(School_crcDateView, DateDetailView):
pass
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('school_school_crc_list')
class School_crcDayArchiveView(
School_crcDateView, School_crcBaseListView, DayArchiveView):
pass
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('school_school_crc_list')
class School_crcDeleteView(School_crcView, DeleteView):
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('school_school_crc_list')
class School_crcDetailView(School_crcView, DetailView):
pass
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('school_school_crc_list')
class School_crcListView(School_crcBaseListView, ListView):
pass
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('school_school_crc_list')
class School_crcMonthArchiveView(
School_crcDateView, School_crcBaseListView, MonthArchiveView):
pass
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('school_school_crc_list')
class School_crcTodayArchiveView(
School_crcDateView, School_crcBaseListView, TodayArchiveView):
pass
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('school_school_crc_list')
class School_crcUpdateView(School_crcView, UpdateView):
pass
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('school_school_crc_list')
class School_crcWeekArchiveView(
School_crcDateView, School_crcBaseListView, WeekArchiveView):
pass
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('school_school_crc_list')
class School_crcYearArchiveView(
School_crcDateView, School_crcBaseListView, YearArchiveView):
make_object_list = True
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('school_school_crc_list')
| 1,498 | 1,701 | 372 |
523087f5c9b541dd171447caeea6a7c8373464ea | 33,904 | py | Python | upddatedb_pi.py | LTAcosta/TBN-Plex | 35e9eaf20d38c4b2770070932b5b8f48ced1e6a3 | [
"MIT"
] | 2 | 2020-12-19T22:16:52.000Z | 2021-05-31T23:53:12.000Z | upddatedb_pi.py | LTAcosta/TBN-Plex | 35e9eaf20d38c4b2770070932b5b8f48ced1e6a3 | [
"MIT"
] | null | null | null | upddatedb_pi.py | LTAcosta/TBN-Plex | 35e9eaf20d38c4b2770070932b5b8f48ced1e6a3 | [
"MIT"
] | 1 | 2018-01-10T09:40:43.000Z | 2018-01-10T09:40:43.000Z | import urllib3
import subprocess
import requests
import time
import os
import sys
import sqlite3
import platform
#top
try:
input = raw_input
except NameError:
pass
global favtv
global favmve
global tvgenres
global moviegenres
favtv = []
favmve = []
tvgenres = []
moviegenres = []
MYDB = homedir + "myplex.db"
http = urllib3.PoolManager()
sql = sqlite3.connect(MYDB)
cur = sql.cursor()
FIXME = homedir + "fixme.txt"
PROBLEMS = homedir + "problems.txt"
with open (PROBLEMS, "w") as file:
file.write('')
file.close()
ostype = platform.system()
cur.execute('CREATE TABLE IF NOT EXISTS settings(item TEXT, setting TEXT)')
sql.commit()
command = 'SELECT setting FROM settings WHERE item LIKE \'TVPART\''
cur.execute(command)
if not cur.fetchone():
print ("Looks like you have never run the update DB script. I need some information to proceed.\n Enter the link to your metadata.\n Example: http://192.168.1.134:32400/library/metadata/\n")
TVPART = str(input('Link:'))
cur.execute('INSERT INTO settings VALUES(?, ?)', ("TVPART",TVPART.strip()))
sql.commit()
print (TVPART + " has been added to the settings table. Moving on.")
else:
cur.execute(command)
test2 = cur.fetchone()[0]
TVPART = test2
command = 'SELECT setting FROM settings WHERE item LIKE \'TVGET\''
cur.execute(command)
if not cur.fetchone():
print ("Enter the link to your TV show tree.\nExample: http://192.168.1.134:32400/library/sections/1/all/ \n")
TVGET = str(input('Link:'))
cur.execute('INSERT INTO settings VALUES(?, ?)', ("TVGET",TVGET.strip()))
sql.commit()
print (TVGET + " has been added to the settings table. Moving on.")
else:
cur.execute(command)
test1 = cur.fetchone()[0]
TVGET = test1
command = 'SELECT setting FROM settings WHERE item LIKE \'MOVIEGET\''
cur.execute(command)
if not cur.fetchone():
print ("Enter the link to your Movie tree.\nExample: http://192.168.1.134:32400/library/sections/2/all/ \n")
MOVIEGET = str(input('Link:'))
cur.execute('INSERT INTO settings VALUES(?, ?)', ("MOVIEGET",MOVIEGET.strip()))
sql.commit()
print (MOVIEGET + " has been added to the settings table. Moving on.")
else:
cur.execute(command)
test = cur.fetchone()[0]
MOVIEGET = test
print ("Database update starting...\n")
cur.execute('CREATE TABLE IF NOT EXISTS shows(TShow TEXT, Episode TEXT, Season INT, Enum INT, Tnum INT, Summary TEXT, Link TEXT)')
sql.commit()
cur.execute('CREATE TABLE IF NOT EXISTS Movies(Movie TEXT, Summary TEXT, Rating TEXT, Tagline TEXT, Genre TEXT, Director TEXT, Actors TEXT)')
sql.commit()
cur.execute('CREATE TABLE IF NOT EXISTS TVshowlist(TShow TEXT, Summary TEXT, Genre TEXT, Rating TEXT, Duration INT, Totalnum INT)')
sql.commit()
#mark
try:
if "Windows" not in ostype:
option = str(sys.argv[1])
else:
print ("Notice: For Windows, the update db script may default to 'all' when there is an argument failure.\n")
option = "all"
if ("updatetv" in option):
getgenrestv()
startupactiontv()
gettvshows()
getshows()
restoregenrestv()
elif ("updatemovies" in option):
getgenresmovie()
startupactionmovie()
getmovies()
restoregenremovies()
elif ("all" in option):
getgenrestv()
getgenresmovie()
startupactiontv()
startupactionmovie()
gettvshows()
getshows()
getmovies()
restoregenrestv()
restoregenremovies()
elif ("getcommercials" in option):
getcommercials()
print ("Commercial Get Finished.")
elif ("getprerolls" in option):
getprerolls()
print ("Preroll Get Finished.")
except TypeError:
print ("No option specified. Use 'updatetv' or 'updatemovies' or 'all' to update your db.")
print ("Done")
| 26.039939 | 191 | 0.602259 | import urllib3
import subprocess
import requests
import time
import os
import sys
import sqlite3
import platform
#top
try:
input = raw_input
except NameError:
pass
global favtv
global favmve
global tvgenres
global moviegenres
favtv = []
favmve = []
tvgenres = []
moviegenres = []
MYDB = homedir + "myplex.db"
http = urllib3.PoolManager()
sql = sqlite3.connect(MYDB)
cur = sql.cursor()
FIXME = homedir + "fixme.txt"
PROBLEMS = homedir + "problems.txt"
with open (PROBLEMS, "w") as file:
file.write('')
file.close()
ostype = platform.system()
cur.execute('CREATE TABLE IF NOT EXISTS settings(item TEXT, setting TEXT)')
sql.commit()
command = 'SELECT setting FROM settings WHERE item LIKE \'TVPART\''
cur.execute(command)
if not cur.fetchone():
print ("Looks like you have never run the update DB script. I need some information to proceed.\n Enter the link to your metadata.\n Example: http://192.168.1.134:32400/library/metadata/\n")
TVPART = str(input('Link:'))
cur.execute('INSERT INTO settings VALUES(?, ?)', ("TVPART",TVPART.strip()))
sql.commit()
print (TVPART + " has been added to the settings table. Moving on.")
else:
cur.execute(command)
test2 = cur.fetchone()[0]
TVPART = test2
command = 'SELECT setting FROM settings WHERE item LIKE \'TVGET\''
cur.execute(command)
if not cur.fetchone():
print ("Enter the link to your TV show tree.\nExample: http://192.168.1.134:32400/library/sections/1/all/ \n")
TVGET = str(input('Link:'))
cur.execute('INSERT INTO settings VALUES(?, ?)', ("TVGET",TVGET.strip()))
sql.commit()
print (TVGET + " has been added to the settings table. Moving on.")
else:
cur.execute(command)
test1 = cur.fetchone()[0]
TVGET = test1
command = 'SELECT setting FROM settings WHERE item LIKE \'MOVIEGET\''
cur.execute(command)
if not cur.fetchone():
print ("Enter the link to your Movie tree.\nExample: http://192.168.1.134:32400/library/sections/2/all/ \n")
MOVIEGET = str(input('Link:'))
cur.execute('INSERT INTO settings VALUES(?, ?)', ("MOVIEGET",MOVIEGET.strip()))
sql.commit()
print (MOVIEGET + " has been added to the settings table. Moving on.")
else:
cur.execute(command)
test = cur.fetchone()[0]
MOVIEGET = test
print ("Database update starting...\n")
cur.execute('CREATE TABLE IF NOT EXISTS shows(TShow TEXT, Episode TEXT, Season INT, Enum INT, Tnum INT, Summary TEXT, Link TEXT)')
sql.commit()
cur.execute('CREATE TABLE IF NOT EXISTS Movies(Movie TEXT, Summary TEXT, Rating TEXT, Tagline TEXT, Genre TEXT, Director TEXT, Actors TEXT)')
sql.commit()
cur.execute('CREATE TABLE IF NOT EXISTS TVshowlist(TShow TEXT, Summary TEXT, Genre TEXT, Rating TEXT, Duration INT, Totalnum INT)')
sql.commit()
def getshows():
response = http.urlopen('GET', TVGET, preload_content=False).read()
response = str(response)
#print (response)
shows = response.split('<Directory ratingKey=')
counter = 1
while counter <= int(len(shows)-1):
show = shows[counter]
genres = show
studio = show
title = show
title = title.split('title="')
title = title[1]
title = title.split('"')
title = title[0]
title = title.replace(''','\'')
title = title.replace('&','&')
title = title.replace('?','')
title = title.replace('/',' ')
summary = show
rating = show
duration = show
totalnum = show
summary = show
summary = summary.split('summary="')
summary = summary[1]
summary = summary.split('"')
summary = summary[0]
summary = summary.replace('\'','')
rating = show
try:
rating = rating.split('contentRating="')
rating = rating[1]
rating = rating.split('"')
rating = rating[0]
except Exception:
rating = 'N\A'
duration = show
duration = duration.split('duration="')
duration = duration[1]
duration = duration.split('"')
duration = duration[0]
duration = int(duration)/60000
totalnum = show
totalnum = totalnum.split(' leafCount="')
totalnum = totalnum[1]
totalnum = totalnum.split('"')
totalnum = totalnum[0]
name = title
TShow = name
genres = genres.split("<Genre tag=\"")
try:
genre = genres[1]
except IndexError:
genre = "none"
try:
genre2 = genres[2]
genre2 = genre2.split('" />')
genre2 = genre2[0]
#print (genre2)
except IndexError:
genre2 = "none"
try:
genre3 = genres[3]
genre3 = genre3.split('" />')
genre3 = genre3[0]
#print (genre2)
except IndexError:
genre3 = "none"
genre = genre.split('" />')
genre = genre[0] + ";" + genre2 + ";" + genre3 + ";"
genre = genre.replace('none;','')
#print (genre)
studio = studio.split("studio=\"")
try:
studio = studio[1]
studio = studio.split("\"")
studio = studio[0]
except IndexError:
studio = "None"
TShow = TShow.replace("'","''")
summary = summary.replace("'","''")
summary = str(summary.decode('ascii','ignore')).strip()
cur.execute("SELECT * FROM TVshowlist WHERE TShow LIKE \"" + TShow + "\"")
try:
if not cur.fetchone():
cur.execute("INSERT INTO TVshowlist VALUES(?, ?, ?, ?, ?, ?)", (TShow, summary, genre, rating, int(duration), int(totalnum)))
sql.commit()
except Exception:
print ("Error adding " + TShow)
with open(PROBLEMS, 'a') as file:
file.write(TShow + "\n")
file.close()
counter = counter + 1
print ("TV entries checked.")
def fixTVfiles():
if "Windows" in ostype:
PLdir = homedir + "Genre\\TV\\"
else:
PLdir = homedir + "/Genre/TV/"
from os import listdir
from os.path import isfile, join
showlist = [f for f in listdir(PLdir) if isfile(join(PLdir, f))]
say = showlist
for item in say:
WorkingDir = PLdir + item
with open(WorkingDir, 'r') as file:
startfile = file.read()
file.close()
startfile = startfile.rstrip()
with open(WorkingDir, 'w') as file:
file.write(startfile)
file.close()
print ("Part 1 done. Moving on.")
if "Windows" in ostype:
PLdir = homedir + "\\Studio\\"
else:
PLdir = homedir + "/Studio/"
showlist = [f for f in listdir(PLdir) if isfile(join(PLdir, f))]
say = showlist
for item in say:
WorkingDir = PLdir + item
with open(WorkingDir, 'r') as file:
startfile = file.read()
file.close()
startfile = startfile.rstrip()
with open(WorkingDir, 'w') as file:
file.write(startfile)
file.close()
print ("TV Files Cleaned")
def getshow(show):
response = http.urlopen('GET', TVGET, preload_content=False).read()
response = str(response)
#print (response)
shows = response.split('<Directory ratingKey=')
counter = 1
workingdir = homedir + "tvshowlist.txt"
while counter <= int(len(shows)-1):
show = shows[counter]
genres = show
studio = show
title = show
title = title.split('title="')
title = title[1]
title = title.split('"')
title = title[0]
title = title.replace(''','\'')
title = title.replace('&','&')
title = title.replace('?','')
title = title.replace('/',' ')
try:
with open(workingdir, 'a') as file:
file.write(title)
file.write("\n")
file.close()
except FileNotFoundError:
with open(workingdir, 'w+') as file:
file.write(title)
file.write("\n")
file.close()
name = title
TShow = name
if (("'" in TShow) and ("''" not in TShow)):
TShow = TShow.replace("'","''")
print (TShow)
title = title + '.txt.'
title = homedir + title
genres = genres.split("<Genre tag=\"")
try:
genre = genres[1]
except IndexError:
genre = "none"
try:
genre2 = genres[2]
genre2 = genre2.split('" />')
genre2 = genre2[0]
#print (genre2)
except IndexError:
genre2 = "none"
try:
genre3 = genres[3]
genre3 = genre3.split('" />')
genre3 = genre3[0]
#print (genre2)
except IndexError:
genre3 = "none"
genre = genre.split('" />')
genre = genre[0]
#print (genre)
if (genre != "none"):
if "Windows" in ostype:
path = homedir + "\\Genre\\TV\\" + str(genre) + ".txt"
else:
path = homedir + "Genre/TV/" + str(genre) + ".txt"
try:
with open(path, 'a') as file:
file.write(TShow)
file.write("\n")
file.close()
except FileNotFoundError:
with open(path, 'w') as file:
file.write(TShow)
file.write("\n")
file.close()
if "none" != genre2:
if "Windows" in ostype:
path = homedir + "\\Genre\\TV\\" + str(genre2) + ".txt"
else:
path = homedir + "Genre/TV/" + str(genre2) + ".txt"
try:
with open(path, 'a') as file:
file.write(TShow)
file.write("\n")
file.close()
except FileNotFoundError:
print (genre2 + " created!")
with open(path, 'w+') as file:
file.write(TShow)
file.write("\n")
file.close()
if "none" != genre3:
if "Windows" in ostype:
path = homedir + "\\Genre\\TV\\" + str(genre3) + ".txt"
else:
path = homedir + "Genre/TV/" + str(genre3) + ".txt"
try:
with open(path, 'a') as file:
file.write(TShow)
file.write("\n")
file.close()
except FileNotFoundError:
print (genre3 + " created!")
with open(path, 'w+') as file:
file.write(TShow)
file.write("\n")
file.close()
studio = studio.split("studio=\"")
try:
studio = studio[1]
studio = studio.split("\"")
studio = studio[0]
path = homedir + "/Studio/" + str(studio) + ".txt"
try:
with open(path, 'a') as file:
file.write(TShow)
file.write("\n")
file.close()
except FileNotFoundError:
print ("Studio File Created")
with open(path, 'w+') as file:
file.write(TShow)
file.write("\n")
file.close()
except IndexError:
print ("No Studio Available. Skipping " + TShow)
show = show.split('" key')
show = show[0]
show = show.replace("\"", "")
show = show.rstrip()
episode = show
link = TVPART + show + "/allLeaves"
xresponse = http.urlopen('GET', link, preload_content=False).read()
xresponse = str(xresponse)
episodes = xresponse.split('type="episode" title="')
#print (episodes)
for episode in episodes:
Season = episode
Enum = episode
Summary = episode
Link = episode
episode = episode.split('"')
episode = episode[0]
episode = episode + "\n"
episode = episode.replace(''','\'')
episode = episode.replace('&','&')
Episode = episode.strip()
if ("<?xml version=" in episode.strip()):
#print ("Pass")
Tnum = 0
else:
if ("(" in episode):
xepisode = name + " " + episode
with open(FIXME, 'a') as file:
file.write(xepisode)
file.close()
#episode = episode.rstrip()
#print (episode)
if episode != "Original":
try:
Tnum = Tnum + 1
except Exception:
Tnum = 0
#print (Season)
Season = Season.split('parentIndex="')
#print (Season)
Season = Season[1]
Season = Season.split('"')
Season = Season[0]
Enum = Enum.split('index="')
Enum = Enum[1]
Enum = Enum.split('"')
Enum = Enum[0]
Summary = Summary.split('summary="')
Summary = Summary[1]
Summary = Summary.split('" index')
Summary = Summary[0]
Summary = Summary.replace(",", "")
Summary = Summary.replace('\xe2',"")
Summary = Summary.replace(""","")
try:
Summary = Summary.decode("ascii", "ignore")
except Exception:
pass
#Summary = remove_accents(Summary)
Link = Link.split('<Part id=')
Link = Link[1]
Link = Link.split('key="')
Link = Link[1]
Link = Link.split('" duration')
Link = Link[0]
TShow = str(TShow)
#print (TShow)
Episode = str(Episode)
#print (Episode)
Season = int(Season)
#print (str(Season))
Enum = int(Enum)
#print (str(Enum))
Tnum = int(Tnum)
#print (str(Tnum))
Summary = str(Summary.encode('ascii','ignore').strip())
#print (Summary)
Link = str(Link.strip().encode('ascii','replace'))
#print (Link)
try:
cur.execute('SELECT * FROM shows WHERE TShow LIKE \'' + TShow + '\' AND Tnum LIKE \'' + str(Tnum) + '\'')
if not cur.fetchone():
cur.execute('INSERT INTO shows VALUES(?, ?, ?, ?, ?, ?, ?)', (TShow, Episode, Season, Enum, Tnum, Summary, Link))
sql.commit()
print ("New Episode Found: " + TShow + " Episode: " + Episode)
except Exception:
print ("Error adding " + TShow)
with open(PROBLEMS, 'a') as file:
file.write(TShow + " " + Episode + "\n")
file.close()
counter = counter + 1
fixTVfiles()
print ("TV entries checked.")
def gettvshows():
response = http.urlopen('GET', TVGET, preload_content=False).read()
response = str(response)
#print (response)
shows = response.split('<Directory ratingKey=')
counter = 1
workingdir = homedir + "tvshowlist.txt"
while counter <= int(len(shows)-1):
show = shows[counter]
genres = show
studio = show
title = show
title = title.split('title="')
title = title[1]
title = title.split('"')
title = title[0]
title = title.replace(''','\'')
title = title.replace('&','&')
title = title.replace('?','')
title = title.replace('/',' ')
try:
with open(workingdir, 'a') as file:
file.write(title)
file.write("\n")
file.close()
except FileNotFoundError:
with open(workingdir, 'w+') as file:
file.write(title)
file.write("\n")
file.close()
name = title
TShow = name
if (("'" in TShow) and ("''" not in TShow)):
TShow = TShow.replace("'","''")
print (TShow)
title = title + '.txt.'
title = homedir + title
genres = genres.split("<Genre tag=\"")
try:
genre = genres[1]
except IndexError:
genre = "none"
try:
genre2 = genres[2]
genre2 = genre2.split('" />')
genre2 = genre2[0]
#print (genre2)
except IndexError:
genre2 = "none"
try:
genre3 = genres[3]
genre3 = genre3.split('" />')
genre3 = genre3[0]
#print (genre2)
except IndexError:
genre3 = "none"
genre = genre.split('" />')
genre = genre[0]
#print (genre)
if (genre != "none"):
path = homedir + "Genre/TV/" + str(genre) + ".txt"
try:
with open(path, 'a') as file:
file.write(TShow)
file.write("\n")
file.close()
except FileNotFoundError:
with open(path, 'w') as file:
file.write(TShow)
file.write("\n")
file.close()
if "none" != genre2:
path = homedir + "Genre/TV/" + str(genre2) + ".txt"
try:
with open(path, 'a') as file:
file.write(TShow)
file.write("\n")
file.close()
except FileNotFoundError:
print (genre2 + " created!")
with open(path, 'w+') as file:
file.write(TShow)
file.write("\n")
file.close()
if "none" != genre3:
path = homedir + "Genre/TV/" + str(genre3) + ".txt"
try:
with open(path, 'a') as file:
file.write(TShow)
file.write("\n")
file.close()
except FileNotFoundError:
print (genre3 + " created!")
with open(path, 'w+') as file:
file.write(TShow)
file.write("\n")
file.close()
studio = studio.split("studio=\"")
try:
studio = studio[1]
studio = studio.split("\"")
studio = studio[0]
path = homedir + "/Studio/" + str(studio) + ".txt"
try:
with open(path, 'a') as file:
file.write(TShow)
file.write("\n")
file.close()
except FileNotFoundError:
print ("Studio File Created")
with open(path, 'w+') as file:
file.write(TShow)
file.write("\n")
file.close()
except IndexError:
print ("No Studio Available. Skipping " + TShow)
show = show.split('" key')
show = show[0]
show = show.replace("\"", "")
show = show.rstrip()
episode = show
link = TVPART + show + "/allLeaves"
print (link)
xresponse = http.urlopen('GET', link, preload_content=False).read()
xresponse = str(xresponse)
episodes = xresponse.split('type="episode" title="')
#print (episodes)
for episode in episodes:
Season = episode
Enum = episode
Summary = episode
Link = episode
episode = episode.split('"')
episode = episode[0]
episode = episode + "\n"
episode = episode.replace(''','\'')
episode = episode.replace('&','&')
Episode = episode.strip()
if ("<?xml version=" in episode.strip()):
#print ("Pass")
Tnum = 0
else:
if ("(" in episode):
xepisode = name + " " + episode
with open(FIXME, 'a') as file:
file.write(xepisode)
file.close()
#episode = episode.rstrip()
#print (episode)
if episode != "Original":
#print ("Skipping")
#else:
#with open(title, "a") as file:
#file.write(episode)
#file.close()
try:
Tnum = Tnum + 1
except Exception:
Tnum = 0
#print (Season)
Season = Season.split('parentIndex="')
#print (Season)
Season = Season[1]
Season = Season.split('"')
Season = Season[0]
Enum = Enum.split('index="')
Enum = Enum[1]
Enum = Enum.split('"')
Enum = Enum[0]
Summary = Summary.split('summary="')
Summary = Summary[1]
Summary = Summary.split('" index')
Summary = Summary[0]
Summary = Summary.replace(",", "")
Summary = Summary.replace('\xe2',"")
Summary = Summary.replace(""","")
try:
Summary = Summary.decode("ascii", "ignore")
except Exception:
pass
#Summary = remove_accents(Summary)
Link = Link.split('<Part id=')
Link = Link[1]
Link = Link.split('key="')
Link = Link[1]
Link = Link.split('" duration')
Link = Link[0]
TShow = str(TShow)
#TShow = TShow.replace("'","''")
#print (TShow)
Episode = str(Episode)
Episode = Episode.replace("'","''")
#print (Episode)
Season = int(Season)
#print (str(Season))
Enum = int(Enum)
#print (str(Enum))
Tnum = int(Tnum)
#print (str(Tnum))
Summary = str(Summary.encode('ascii','ignore').strip())
Summary = Summary.replace("'","''")
#print (Summary)
Link = str(Link.strip().encode('ascii','replace'))
#print (Link)
cur.execute("SELECT * FROM shows WHERE TShow LIKE \"" + TShow + "\" AND Tnum LIKE \"" + str(Tnum) + "\"")
try:
if not cur.fetchone():
cur.execute("INSERT INTO shows VALUES(?, ?, ?, ?, ?, ?, ?)", (TShow, Episode, Season, Enum, Tnum, Summary, Link))
sql.commit()
print ("New Episode Found: " + TShow + " Episode: " + Episode)
except Exception:
print ("Error adding " + TShow)
with open(PROBLEMS, 'a') as file:
file.write(TShow + " " + Episode + "\n")
file.close()
counter = counter + 1
fixTVfiles()
print ("TV entries checked.")
def fixmvfiles():
PLdir = homedir + "movielist.txt"
with open(PLdir, 'r') as file:
startfile = file.read()
file.close()
startfile = startfile.rstrip()
with open(PLdir, 'w') as file:
file.write(startfile)
file.close()
print ("Movie File Cleaned")
#mark
def getmovies():
response = http.urlopen('GET', MOVIEGET, preload_content=False).read()
response = str(response)
#print (response)
shows = response.split('<Video ratingKey=')
counter = 1
Moviedir = homedir + "movielist.txt"
while counter <= int(len(shows)-1):
show = shows[counter]
genres = show
studio = show
director = show
actors = show
rating = show
summary = show
tagline = show
title = show
title = title.split('title="')
title = title[1]
title = title.split('"')
title = title[0]
title = title.replace(''','\'')
title = title.replace('&','&')
#title = title.replace('?','')
#title = title.replace('/',' ')
try:
with open(Moviedir, 'a') as file:
file.write(title)
file.write("\n")
file.close()
except FileNotFoundError:
with open(Moviedir, 'w+') as file:
file.write(title)
file.write("\n")
file.close()
name = title
genres = genres.split("<Genre tag=\"")
try:
genre = genres[1]
except IndexError:
genre = "none"
try:
genre2 = genres[2]
genre2 = genre2.split('" />')
genre2 = genre2[0]
#print (genre2)
except IndexError:
genre2 = "none"
try:
genre3 = genres[3]
genre3 = genre3.split('" />')
genre3 = genre3[0]
#print (genre2)
except IndexError:
genre3 = "none"
genre = genre.split('" />')
genre = genre[0]
bgenre = genre + " " + genre2 + " " + genre3
#print (genre)
directors = director.split("<Director tag=\"")
try:
director = directors[1]
except IndexError:
director = "none"
try:
director2 = directors[2]
director2 = director2.split('" />')
director2 = director2[0]
except IndexError:
director2 = "none"
try:
director3 = directors[3]
director3 = director3.split('" />')
director3 = director3[0]
except IndexError:
director3 = "none"
director = director.split('" />')
director = director[0]
directors = director + " " + director2 + " " + director3
directors = directors.replace("none", "")
#print (directors)
actorss = actors.split("<Role tag=\"")
try:
actors = actorss[1]
except IndexError:
actors = "none"
try:
actors2 = actorss[2]
actors2 = actors2.split('" />')
actors2 = actors2[0]
except IndexError:
actors2 = "none"
try:
actors3 = actorss[3]
actors3 = actors3.split('" />')
actors3 = actors3[0]
except IndexError:
actors3 = "none"
actors = actors.split('" />')
actors = actors[0]
bactors = actors + " " + actors2 + " " + actors3
bactors = bactors.replace("none", "")
rating = rating.split("contentRating=\"")
try:
rating = rating[1]
rating = rating.split("\"")
rating = rating[0]
except IndexError:
print ("No Rating Available. Skipping " + name)
rating = "none"
tagline = tagline.split("tagline=\"")
try:
tagline = tagline[1]
tagline = tagline.split("\" ")
tagline = tagline[0]
except IndexError:
print ("No Tagline Available. Skipping " + name)
tagline = "none"
summary = summary.split("summary=\"")
try:
summary = summary[1]
summary = summary.split("\"")
summary = summary[0]
except IndexError:
print ("No Summary Available. Skipping " + name)
summary = "none"
#marker
summary = summary.replace(''','\'')
summary = summary.replace('&','&')
summary = summary.replace(',', ' ')
summary = summary.replace('\'','')
try:
summary = summary.decode("ascii", "ignore")
except Exception:
pass
name = name.replace(''','\'')
name = name.replace('&','&')
name = name.replace(',', ' ')
name = name.replace("'","''")
#print (tagline)
#tagline = tagline.replace(''','\'')
tagline = tagline.replace(''','')
tagline = tagline.replace('&','&')
tagline = tagline.replace(',', ' ')
tagline = tagline.replace('\'','')
try:
tagline = tagline.decode("ascii", "ignore")
except Exception:
pass
#directors = directors.replace(''','\'')
directors = directors.replace(''','')
directors = directors.replace('&','&')
directors = directors.replace(',', ' ')
directors = directors.replace('\'','')
try:
directors = directors.decode("ascii", "ignore")
except Exception:
pass
#bgenre = bgenre.replace(''','\'')
bgenre = bgenre.replace(''','')
bgenre = bgenre.replace('&','&')
bgenre = bgenre.replace(',', ' ')
bgenre = bgenre.replace("none", "")
#bactors = bactors.replace(''','\'')
bactors = bactors.replace(''','')
bactors = bactors.replace('\'','')
bactors = bactors.replace('&','&')
bactors = bactors.replace(',', ' ')
try:
bactors = bactors.decode("ascii", "ignore")
except Exception:
pass
#print (name + " " + summary + " " + rating + " " + tagline + " " + bgenre + " " + directors + " " + bactors)
try:
cur.execute('SELECT * FROM Movies WHERE Movie LIKE \'' + name + '\'')
if not cur.fetchone():
cur.execute('INSERT INTO Movies VALUES(?, ?, ?, ?, ?, ?, ?)', (name, summary, rating, tagline, bgenre, directors, bactors))
sql.commit()
print ("New movie found and added to the DB.")
except Exception:
print ("Error adding " + name)
with open(PROBLEMS, 'a') as file:
file.write(name.decode("ascii", "ignore") + " " + bactors + "\n")
file.close()
counter = counter + 1
fixmvfiles()
def getcommercials():
cur.execute("CREATE TABLE IF NOT EXISTS commercials(name TEXT, duration INT)")
sql.commit()
command = "SELECT setting FROM settings WHERE item LIKE \"COMPART\""
cur.execute(command)
if not cur.fetchone():
print ("You need to supply the link to find your commercials.\nExample: http://192.168.1.134:32400/library/metadata/\n")
COMPART = str(input('Link:'))
cur.execute("INSERT INTO settings VALUES(?,?)", ("COMPART",COMPART.strip()))
sql.commit()
print (COMPART + " has been added to the settings table. Moving on.")
else:
cur.execute(command)
COMPART = cur.fetchone()[0]
cur.execute("DELETE FROM commercials")
sql.commit()
response = http.urlopen('GET', COMPART, preload_content=False).read()
response = str(response)
commercials = response.split("<Video ratingKey=")
counter = 1
while counter <= int(len(commercials)-1):
comc = commercials[counter]
duration = comc
comc = comc.split("title=\"")
comc = comc[1]
comc = comc.split("\"")
comc = comc[0].strip()
duration = duration.split("duration=\"")
duration = duration[1]
duration = duration.split("\"")
duration = duration[0].strip()
duration = int(duration)/1000
cur.execute("SELECT * FROM commercials WHERE name LIKE \"" + comc + "\"")
if not cur.fetchone():
cur.execute("INSERT INTO commercials VALUES (?,?)", (comc, duration))
sql.commit()
print ("New Commercial Found: " + comc)
counter = counter + 1
print ("Done")
def getprerolls():
cur.execute("CREATE TABLE IF NOT EXISTS prerolls(name TEXT, duration INT)")
sql.commit()
command = "SELECT setting FROM settings WHERE item LIKE \"PREROLLPART\""
cur.execute(command)
if not cur.fetchone():
print ("You need to supply the link to find your prerolls.\nExample: http://192.168.1.134:32400/library/metadata/\n")
PREROLLPART = str(input('Link:'))
cur.execute("INSERT INTO settings VALUES(?,?)", ("PREROLLPART",PREROLLPART.strip()))
sql.commit()
print (PREROLLPART + " has been added to the settings table. Moving on.")
else:
cur.execute(command)
PREROLLPART = cur.fetchone()[0]
cur.execute("DELETE FROM prerolls")
sql.commit()
response = http.urlopen('GET', PREROLLPART, preload_content=False).read()
response = str(response)
commercials = response.split("<Video ratingKey=")
counter = 1
while counter <= int(len(commercials)-1):
comc = commercials[counter]
duration = comc
comc = comc.split("title=\"")
comc = comc[1]
comc = comc.split("\"")
comc = comc[0].strip()
print (comc)
duration = duration.split("duration=\"")
duration = duration[1]
duration = duration.split("\"")
duration = duration[0].strip()
duration = int(duration)/1000
cur.execute("SELECT * FROM prerolls WHERE name LIKE \"" + comc + "\"")
if not cur.fetchone():
cur.execute("INSERT INTO prerolls VALUES (?,?)", (comc, duration))
sql.commit()
print ("New preroll Found: " + comc)
counter = counter + 1
print ("Done")
def startupactiontv():
cur.execute("DELETE FROM TVshowlist")
sql.commit()
cur.execute("DELETE FROM shows")
sql.commit()
print ("TV Tables purged and ready for data.")
def startupactionmovie():
cur.execute("DELETE FROM Movies")
sql.commit()
print ("TV Tables purged and ready for data.")
def getfavorites():
global favtv
global favmve
cur.execute("SELECT Movie FROM Movies WHERE Genre LIKE \"%favorite%\"")
mlist = cur.fetchall()
for mve in mlist:
mve = mve[0]
if mve.strip() not in favmve:
favmve.append(mve)
cur.execute("SELECT TShow FROM TVshowlist WHERE Genre LIKE \"%favorite%\"")
tvlist = cur.fetchall()
for item in tvlist:
item = item[0]
if item not in favtv:
favtv.append(item)
print ("Favorites Acquired. Moving On.")
def restorefavorites():
global favtv
global favmve
cmdtv = "python ./system.py addfavoriteshow "
cmdmv = "python ./system.py addfavoritemovie "
for show in favtv:
command = cmdtv + "\"" + show.strip() + "\""
os.system(command)
for movie in favmve:
command = cmdmv + "\"" + movie.strip() + "\""
os.system(command)
print ("Favorites Restored.")
def getgenrestv():
global tvgenres
command = "SELECT Genre FROM TVshowlist ORDER BY Genre ASC"
cur.execute(command)
fgenres = cur.fetchall()
xshowlist = []
for genres in fgenres:
genre = genres[0].split(";")
for xgen in genre:
if xgen not in xshowlist:
xshowlist.append(xgen)
for genre in xshowlist:
genre = genre.strip()
command = "SELECT TShow from TVshowlist where Genre LIKE \"%" + genre + "%\""
cur.execute(command)
shows = cur.fetchall()
for show in shows:
show = show[0]
try:
writeme = writeme + "," + show
except NameError:
writeme = genre + ":" + show
tvgenres.append(writeme)
del writeme
print ("TV Genres Saved.")
def getgenresmovie():
global moviegenres
command = "SELECT Genre FROM Movies ORDER BY Genre ASC"
cur.execute(command)
fgenres = cur.fetchall()
xmovies = []
for genres in fgenres:
genre = genres[0].split(" ")
for xgen in genre:
if ((xgen not in xmovies) and (xgen != "")):
xmovies.append(xgen)
for genre in xmovies:
if genre == " ":
pass
else:
genre = genre.strip()
#print (genre)
command = "SELECT Movie FROM Movies WHERE Genre LIKE \"%" + genre + "%\""
cur.execute(command)
movies = cur.fetchall()
for movie in movies:
movie = movie[0]
try:
writeme = writeme + "," + movie
except NameError:
writeme = genre + ":" + movie
moviegenres.append(writeme)
#print (writeme)
del writeme
#print moviegenres
print ("Movie Genres Saved.")
def restoregenrestv():
global tvgenres
for genre in tvgenres:
genre = genre.split(":")
shows = genre[1]
genre = genre[0]
shows = shows.split(",")
for show in shows:
show = show.strip()
say = addgenreshow(show,genre)
#print (say)
print ("TV Genres Restored.")
def restoregenremovies():
global moviegenres
for gre in moviegenres:
if gre == " ":
pass
else:
gre = gre.split(":")
shows = gre[1]
genre = gre[0]
shows = shows.split(",")
for show in shows:
show = show.strip()
say = addgenremovie(show, genre)
#print (say)
print ("Movie Genres Restored.")
def addgenreshow(show, genre):
command = 'SELECT * FROM TVshowlist WHERE TShow LIKE \'' + show + '\''
cur.execute(command)
if not cur.fetchone():
return ("Error: " + show + " not found. Check title and try again.")
cur.execute(command)
stuff = cur.fetchone()
TShow = stuff[0]
summary = stuff[1]
try:
genres = stuff[2]
except Exception:
genres = ""
rating = stuff[3]
duration = stuff[4]
totalnum = stuff[5]
if genre.lower() in genres.lower():
return("Error: " + genre + " is already associated with the show " + show)
genres = genres + " " + genre
command = 'DELETE FROM TVshowlist WHERE TShow LIKE \'' + show + '\''
cur.execute(command)
sql.commit()
cur.execute('INSERT INTO TVshowlist VALUES(?,?,?,?,?,?)',(TShow, summary, genres, rating, int(duration), int(totalnum)))
sql.commit()
return (genre + " has been associated with " + show)
def addgenremovie(movie, genre):
command = 'SELECT * FROM Movies WHERE Movie LIKE \'' + movie + '\''
cur.execute(command)
if not cur.fetchone():
say = "Error restoring genre " + genre + " to movie " + movie
return ("Error restoring genre " + genre + " to movie " + movie)
cur.execute(command)
stuff = cur.fetchone()
title = stuff[0]
summary = stuff[1]
rating = stuff[2]
tagline = stuff[3]
genres = stuff[4]
director = stuff[5]
actor = stuff[6]
if genre.lower() in genres.lower():
return("Error: " + genre + " is already associated with the movie " + movie)
genres = genres.strip() + " " + genre
command = 'DELETE FROM Movies WHERE Movie LIKE \'' + movie + '\''
cur.execute(command)
sql.commit()
cur.execute('INSERT INTO Movies VALUES(?,?,?,?,?,?,?)',(title, summary, rating, tagline, genres, director, actor))
sql.commit()
return (genre + " successfully associated with the movie " + movie )
try:
if "Windows" not in ostype:
option = str(sys.argv[1])
else:
print ("Notice: For Windows, the update db script may default to 'all' when there is an argument failure.\n")
option = "all"
if ("updatetv" in option):
getgenrestv()
startupactiontv()
gettvshows()
getshows()
restoregenrestv()
elif ("updatemovies" in option):
getgenresmovie()
startupactionmovie()
getmovies()
restoregenremovies()
elif ("all" in option):
getgenrestv()
getgenresmovie()
startupactiontv()
startupactionmovie()
gettvshows()
getshows()
getmovies()
restoregenrestv()
restoregenremovies()
elif ("getcommercials" in option):
getcommercials()
print ("Commercial Get Finished.")
elif ("getprerolls" in option):
getprerolls()
print ("Preroll Get Finished.")
except TypeError:
print ("No option specified. Use 'updatetv' or 'updatemovies' or 'all' to update your db.")
print ("Done")
| 29,887 | 0 | 414 |
45ad8a2491616a12823e7c286b15c945f03ec986 | 1,541 | py | Python | chat/views.py | chyuting/dj4e-samples | bbe3dcd214789e4be96dd2460018524f8078b4e3 | [
"MIT"
] | 1 | 2021-01-29T15:00:43.000Z | 2021-01-29T15:00:43.000Z | chat/views.py | Lucasmiguelmac/dj4e-samples | a636ce0d49e57bebd098b271ff9356555e468577 | [
"MIT"
] | null | null | null | chat/views.py | Lucasmiguelmac/dj4e-samples | a636ce0d49e57bebd098b271ff9356555e468577 | [
"MIT"
] | null | null | null | from django.shortcuts import render, redirect, reverse
from django.views import View
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.humanize.templatetags.humanize import naturaltime
from django.http import JsonResponse, HttpResponse
from chat.models import Message
from datetime import datetime, timedelta
import time
# References
# https://simpleisbetterthancomplex.com/tutorial/2016/07/27/how-to-return-json-encoded-response.html
| 32.104167 | 100 | 0.699546 | from django.shortcuts import render, redirect, reverse
from django.views import View
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.humanize.templatetags.humanize import naturaltime
from django.http import JsonResponse, HttpResponse
from chat.models import Message
from datetime import datetime, timedelta
import time
class HomeView(View) :
def get(self, request):
return render(request, 'chat/main.html')
def jsonfun(request):
time.sleep(2)
stuff = {
'first': 'first thing',
'second': 'second thing'
}
return JsonResponse(stuff)
class TalkMain(LoginRequiredMixin, View) :
def get(self, request):
# Clean up old records
time_threshold = datetime.now() - timedelta(minutes=60)
Message.objects.filter(created_at__lt=time_threshold).delete()
return render(request, 'chat/talk.html')
def post(self, request) :
message = Message(text=request.POST['message'], owner=request.user)
message.save()
return redirect(reverse('chat:talk'))
class TalkMessages(LoginRequiredMixin, View) :
def get(self, request):
messages = Message.objects.all().order_by('-created_at')[:10]
results = []
for message in messages:
result = [message.text, naturaltime(message.created_at)]
results.append(result)
return JsonResponse(results, safe=False)
# References
# https://simpleisbetterthancomplex.com/tutorial/2016/07/27/how-to-return-json-encoded-response.html
| 831 | 47 | 197 |
12ac692d0a10589809a84612868ff0ec0d7b7906 | 7,605 | py | Python | src/sentry/web/frontend/accounts.py | mattrobenolt/sentry | 3468714db277391de6cbcfda20842e6b460eecb3 | [
"BSD-3-Clause"
] | null | null | null | src/sentry/web/frontend/accounts.py | mattrobenolt/sentry | 3468714db277391de6cbcfda20842e6b460eecb3 | [
"BSD-3-Clause"
] | null | null | null | src/sentry/web/frontend/accounts.py | mattrobenolt/sentry | 3468714db277391de6cbcfda20842e6b460eecb3 | [
"BSD-3-Clause"
] | null | null | null | """
sentry.web.frontend.accounts
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import itertools
from django.contrib import messages
from django.contrib.auth import login as login_user, authenticate
from django.core.context_processors import csrf
from django.db import transaction
from django.http import HttpResponseRedirect
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
from django.utils import timezone
from sudo.decorators import sudo_required
from sentry.models import (
LostPasswordHash, Project, UserOption
)
from sentry.plugins import plugins
from sentry.web.decorators import login_required
from sentry.web.forms.accounts import (
AccountSettingsForm, NotificationSettingsForm, AppearanceSettingsForm,
RecoverPasswordForm, ChangePasswordRecoverForm,
ProjectEmailOptionsForm)
from sentry.web.helpers import render_to_response
from sentry.utils.auth import get_auth_providers, get_login_redirect
from sentry.utils.safe import safe_execute
@login_required
@csrf_protect
@never_cache
@login_required
@sudo_required
@transaction.atomic
@csrf_protect
@never_cache
@login_required
@sudo_required
@transaction.atomic
@csrf_protect
@never_cache
@login_required
@sudo_required
@transaction.atomic
@csrf_protect
@never_cache
@login_required
| 31.820084 | 101 | 0.673899 | """
sentry.web.frontend.accounts
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import itertools
from django.contrib import messages
from django.contrib.auth import login as login_user, authenticate
from django.core.context_processors import csrf
from django.db import transaction
from django.http import HttpResponseRedirect
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
from django.utils import timezone
from sudo.decorators import sudo_required
from sentry.models import (
LostPasswordHash, Project, UserOption
)
from sentry.plugins import plugins
from sentry.web.decorators import login_required
from sentry.web.forms.accounts import (
AccountSettingsForm, NotificationSettingsForm, AppearanceSettingsForm,
RecoverPasswordForm, ChangePasswordRecoverForm,
ProjectEmailOptionsForm)
from sentry.web.helpers import render_to_response
from sentry.utils.auth import get_auth_providers, get_login_redirect
from sentry.utils.safe import safe_execute
@login_required
def login_redirect(request):
login_url = get_login_redirect(request)
return HttpResponseRedirect(login_url)
def recover(request):
form = RecoverPasswordForm(request.POST or None,
captcha=bool(request.session.get('needs_captcha')))
if form.is_valid():
password_hash, created = LostPasswordHash.objects.get_or_create(
user=form.cleaned_data['user']
)
if not password_hash.is_valid():
password_hash.date_added = timezone.now()
password_hash.set_hash()
password_hash.save()
password_hash.send_recover_mail()
request.session.pop('needs_captcha', None)
return render_to_response('sentry/account/recover/sent.html', {
'email': password_hash.user.email,
}, request)
elif request.POST and not request.session.get('needs_captcha'):
request.session['needs_captcha'] = 1
form = RecoverPasswordForm(request.POST or None, captcha=True)
form.errors.pop('captcha', None)
context = {
'form': form,
}
return render_to_response('sentry/account/recover/index.html', context, request)
def recover_confirm(request, user_id, hash):
try:
password_hash = LostPasswordHash.objects.get(user=user_id, hash=hash)
if not password_hash.is_valid():
password_hash.delete()
raise LostPasswordHash.DoesNotExist
user = password_hash.user
except LostPasswordHash.DoesNotExist:
context = {}
tpl = 'sentry/account/recover/failure.html'
else:
tpl = 'sentry/account/recover/confirm.html'
if request.method == 'POST':
form = ChangePasswordRecoverForm(request.POST)
if form.is_valid():
user.set_password(form.cleaned_data['password'])
user.save()
# Ugly way of doing this, but Django requires the backend be set
user = authenticate(
username=user.username,
password=form.cleaned_data['password'],
)
login_user(request, user)
password_hash.delete()
return login_redirect(request)
else:
form = ChangePasswordRecoverForm()
context = {
'form': form,
}
return render_to_response(tpl, context, request)
@csrf_protect
@never_cache
@login_required
@sudo_required
@transaction.atomic
def settings(request):
form = AccountSettingsForm(request.user, request.POST or None, initial={
'email': request.user.email,
'username': request.user.username,
'name': request.user.name,
})
if form.is_valid():
form.save()
messages.add_message(request, messages.SUCCESS, 'Your settings were saved.')
return HttpResponseRedirect(request.path)
context = csrf(request)
context.update({
'form': form,
'page': 'settings',
'AUTH_PROVIDERS': get_auth_providers(),
})
return render_to_response('sentry/account/settings.html', context, request)
@csrf_protect
@never_cache
@login_required
@sudo_required
@transaction.atomic
def appearance_settings(request):
from django.conf import settings
options = UserOption.objects.get_all_values(user=request.user, project=None)
form = AppearanceSettingsForm(request.user, request.POST or None, initial={
'language': options.get('language') or request.LANGUAGE_CODE,
'stacktrace_order': int(options.get('stacktrace_order', -1) or -1),
'timezone': options.get('timezone') or settings.SENTRY_DEFAULT_TIME_ZONE,
'clock_24_hours': options.get('clock_24_hours') or False,
})
if form.is_valid():
form.save()
messages.add_message(request, messages.SUCCESS, 'Your settings were saved.')
return HttpResponseRedirect(request.path)
context = csrf(request)
context.update({
'form': form,
'page': 'appearance',
'AUTH_PROVIDERS': get_auth_providers(),
})
return render_to_response('sentry/account/appearance.html', context, request)
@csrf_protect
@never_cache
@login_required
@sudo_required
@transaction.atomic
def notification_settings(request):
settings_form = NotificationSettingsForm(request.user, request.POST or None)
project_list = list(Project.objects.filter(
team__organizationmemberteam__organizationmember__user=request.user,
team__organizationmemberteam__is_active=True,
).distinct())
project_forms = [
(project, ProjectEmailOptionsForm(
project, request.user,
request.POST or None,
prefix='project-%s' % (project.id,)
))
for project in sorted(project_list, key=lambda x: (
x.team.name if x.team else None, x.name))
]
ext_forms = []
for plugin in plugins.all():
for form in safe_execute(plugin.get_notification_forms) or ():
form = safe_execute(form, plugin, request.user, request.POST or None, prefix=plugin.slug)
if not form:
continue
ext_forms.append(form)
if request.POST:
all_forms = list(itertools.chain(
[settings_form], ext_forms, (f for _, f in project_forms)
))
if all(f.is_valid() for f in all_forms):
for form in all_forms:
form.save()
messages.add_message(request, messages.SUCCESS, 'Your settings were saved.')
return HttpResponseRedirect(request.path)
context = csrf(request)
context.update({
'settings_form': settings_form,
'project_forms': project_forms,
'ext_forms': ext_forms,
'page': 'notifications',
'AUTH_PROVIDERS': get_auth_providers(),
})
return render_to_response('sentry/account/notifications.html', context, request)
@csrf_protect
@never_cache
@login_required
def list_identities(request):
from social_auth.models import UserSocialAuth
identity_list = list(UserSocialAuth.objects.filter(user=request.user))
AUTH_PROVIDERS = get_auth_providers()
context = csrf(request)
context.update({
'identity_list': identity_list,
'page': 'identities',
'AUTH_PROVIDERS': AUTH_PROVIDERS,
})
return render_to_response('sentry/account/identities.html', context, request)
| 5,975 | 0 | 156 |
0ac7bc4c005624506cc0f66028b041697831c329 | 8,696 | py | Python | fonts/OpenTypeMath/Asana-Math/config.py | Klortho/MathJax-dev | e9aa047a806b645b6a87ef1ca1017775d57a7761 | [
"Apache-2.0"
] | 1 | 2019-12-13T14:54:24.000Z | 2019-12-13T14:54:24.000Z | fonts/OpenTypeMath/Asana-Math/config.py | Klortho/MathJax-dev | e9aa047a806b645b6a87ef1ca1017775d57a7761 | [
"Apache-2.0"
] | null | null | null | fonts/OpenTypeMath/Asana-Math/config.py | Klortho/MathJax-dev | e9aa047a806b645b6a87ef1ca1017775d57a7761 | [
"Apache-2.0"
] | null | null | null | # -*- Mode: Python; tab-width: 2; indent-tabs-mode:nil; -*-
# vim: set ts=2 et sw=2 tw=80:
#
# Copyright (c) 2013 MathJax Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
FONTFAMILY_PREFIX = "Asana MathJax"
FONTNAME_PREFIX = "AsanaMathJax"
MATHFONT = "Asana-Math.otf"
MAINFONTS = None
FONTSPLITTING_EXTRA = {
"Variants": [
("zero.onum", 0xE200), # old style numbers
("one.onum", 0xE201),
("two.onum", 0xE202),
("three.onum", 0xE203),
("four.onum", 0xE204),
("five.onum", 0xE205),
("six.onum", 0xE206),
("seven.onum", 0xE207),
("eight.onum", 0xE208),
("nine.onum", 0xE209),
("u1D49C.salt", 0xE20A), # script salt (used as caligraphic)
("uni212C.salt", 0xE20B),
("u1D49E.salt", 0xE20C),
("u1D49F.salt", 0xE20D),
("uni2130.salt", 0xE20E),
("uni2131.salt", 0xE20F),
("u1D4A2.salt", 0xE210),
("uni210B.salt", 0xE211),
("uni2110.salt", 0xE212),
("u1D4A5.salt", 0xE213),
("u1D4A6.salt", 0xE214),
("uni2112.salt", 0xE215),
("uni2133.salt", 0xE216),
("u1D4A9.salt", 0xE217),
("u1D4AA.salt", 0xE218),
("u1D4AB.salt", 0xE219),
("u1D4AC.salt", 0xE21A),
("uni211B.salt", 0xE21B),
("u1D4AE.salt", 0xE21C),
("u1D4AF.salt", 0xE21D),
("u1D4B0.salt", 0xE21E),
("u1D4B1.salt", 0xE21F),
("u1D4B2.salt", 0xE220),
("u1D4B3.salt", 0xE221),
("u1D4B4.salt", 0xE222),
("u1D4B5.salt", 0xE223),
("u1D4D0.salt", 0xE224), # bold script salt (used as bold caligraphic)
("u1D4D1.salt", 0xE225),
("u1D4D2.salt", 0xE226),
("u1D4D3.salt", 0xE227),
("u1D4D4.salt", 0xE228),
("u1D4D5.salt", 0xE229),
("u1D4D6.salt", 0xE22A),
("u1D4D7.salt", 0xE22B),
("u1D4D8.salt", 0xE22C),
("u1D4D9.salt", 0xE22D),
("u1D4DA.salt", 0xE22E),
("u1D4DB.salt", 0xE22F),
("u1D4DC.salt", 0xE230),
("u1D4DD.salt", 0xE231),
("u1D4DE.salt", 0xE232),
("u1D4DF.salt", 0xE233),
("u1D4E0.salt", 0xE234),
("u1D4E1.salt", 0xE235),
("u1D4E2.salt", 0xE236),
("u1D4E3.salt", 0xE237),
("u1D4E4.salt", 0xE238),
("u1D4E5.salt", 0xE239),
("u1D4E6.salt", 0xE23A),
("u1D4E7.salt", 0xE23B),
("u1D4E8.salt", 0xE23C),
("u1D4E9.salt", 0xE23D)
]
}
FONTSPLITTING_REMOVE = None
FONTDATA = {
"FileVersion": "2.3",
"Year": "2013",
"TeX_factor": None, # Leave None for automatic computation
"baselineskip": 1.2,
"lineH": .8,
"lineD": .2,
"hasStyleChar": True
}
RULECHAR = 0x0305
REMAP = {
0x00AF: 0x0304, # MACRON
0x02B9: 0x2032, # prime
0x03D2: 0x03A5, # Upsilon with hook
0x20F0: 0x002A, # (combining star above)
0x25AA: 0x25A0, # blacksquare
0x25B4: 0x25B2, # blacktriangle
0x25B5: 0x25B3, # blacktriangledown
0x25B8: 0x25B6, # blacktriangleright
0x25BE: 0x25BC, # blacktriangledown
0x25BF: 0x25BD, # triangledown
0x25C2: 0x25C0, # blactriangleleft
0x25C3: 0x25C1, # triangleleft
0x2758: 0x2223, # light vertical bar
0x3008: 0x27E8, # langle
0x3009: 0x27E9, # rangle
0xFE37: 0x23DE, 0xFE38: 0x23DF, # over and under braces
}
REMAPACCENT = {
"\u2192": "\u20D7", # vector arrow
"\u2032": "\u0301", # acute accent
"\u007E": "\u0303", # tilde
"\u2035": "\u0300", # grave accent
"\u005E": "\u0302", # hat
"\u0060": "\u0300",
"\u00B4": "\u0301"
}
REMAPACCENTUNDER = {
}
VARIANT = None
VARIANTFONTS = []
TEXCALIGRAPHIC = "offsetA: 0xE20A, noLowerCase: 1"
TEXCALIGRAPHICFONTS = ["VARIANTS"]
TEXOLDSTYLE = "offsetN: 0xE200"
TEXOLDSTYLEFONTS = ["VARIANTS"]
TEXCALIGRAPHICBOLD = "offsetA: 0xE224, noLowerCase: 1"
TEXCALIGRAPHICBOLDFONTS = ["VARIANTS"]
TEXOLDSTYLEBOLD = None
TEXOLDSTYLEBOLDFONTS = []
SANSSERIFGREEK = None
SANSSERIFITALICNUMBER = None
SANSSERIFITALICGREEK = None
SANSSERIFBOLDITALICNUMBER = None
SMALLOPFONTS = None
DELIMITERS = {
0x002D: {"alias": 0x0305, "dir": "H"}, # hyphen-minus
0x002F: {"alias": 0x2044, "dir": "H"}, # slash
0x003D: # equal sign
{
"dir": "H",
"HW": [0x003D],
"stretch": [(0x003D,"rep")]
},
0x005C: # reversed solidus
{
"dir": "V",
"HW": [0x005C]
},
0x002D: {"alias": 0x0305, "dir": "H"}, # minus
0x005E: {"alias": 0x0302, "dir": "H"}, # wide hat
0x005F: {"alias": 0x0332, "dir": "H"}, # low line
0x007E: {"alias": 0x0303, "dir": "H"}, # wide tilde
0x00AF: {"alias": 0x0305, "dir": "H"}, # macron
0x02C6: {"alias": 0x0302, "dir": "H"},
0x02C9: {"alias": 0x0305, "dir": "H"}, # macron
0x02DC: {"alias": 0x0303, "dir": "H"},
0x2015: {"alias": 0x0305, "dir": "H"}, # horizontal line
0x2017: {"alias": 0x0305, "dir": "H"}, # horizontal line
0x203E: {"alias": 0x0305, "dir": "H"}, # overline
0x2195: # updown arrow
{
"dir": "V",
"HW": [0x2195],
"stretch": [("arrowup","top"),("glyph3798","ext"),("arrowdown","bot")]
},
0x21D5: # updown double arrow
{
"dir": "V",
"HW": [0x2195],
"stretch": [("arrowdblup","top"),("glyph3799","ext"),("arrowdbldown","bot")]
},
0x2212: {"alias": 0x0305, "dir": "H"}, # minus
0x2215: {"alias": 0x2044, "dir": "V"}, # division slash
0x2312: {"alias": 0x23DC, "dir": "H"}, # arc
0x2322: {"alias": 0x23DC, "dir": "H"}, # frown
0x2323: {"alias": 0x23DD, "dir": "H"}, # smile
0x2329: {"alias": 0x27E8, "dir": "V"}, # langle
0x232A: {"alias": 0x27E9, "dir": "V"}, # rangle
0x23AA: # \bracevert
{
"dir": "V",
"HW": [0x23AA],
"stretch": [(0x23AA,"ext")]
},
0x23AF: # horizontal line
{
"dir": "H",
"HW": [0x23AF],
"stretch": [(0x23AF,"rep")]
},
0x23B0: {"alias": 0x27C6, "dir": "V"}, # \lmoustache
0x23B1: {"alias": 0x27C5, "dir": "V"}, # \rmoustache
0x23D0: # vertical line extension
{
"dir": "V",
"HW": [0x7C],
"stretch": [(0x7C,"ext")]
},
0x2500: {"alias": 0x0305, "dir": "H"},
0x2758: {"alias": 0x2223, "dir": "V"}, # vertical separator
0x27EE: {"alias": 0x0028, "dir": "V"},
0x27EF: {"alias": 0x0029, "dir": "V"},
0x27F5: {"alias": 0x2190, "dir": "H"}, # long left arrow
0x27F6: {"alias": 0x2192, "dir": "H"}, # long right arrow
0x27F7: {"alias": 0x2194, "dir": "H"}, # long left-right arrow
0x27F8: {"alias": 0x21D0, "dir": "H"}, # long left double arrow
0x27F9: {"alias": 0x21D2, "dir": "H"}, # long right double arrow
0x27FA: {"alias": 0x21D4, "dir": "H"}, # long left-right double arrow
0x27FB: {"alias": 0x21A4, "dir": "H"}, # long left arrow from bar
0x27FC: {"alias": 0x21A6, "dir": "H"}, # long right arrow from bar
0x27FD: {"alias": 0x2906, "dir": "H"}, # long left double arrow from bar
0x27FE: {"alias": 0x2907, "dir": "H"}, # long right double arrow from bar
0x3008: {"alias": 0x27E8, "dir": "V"}, # langle
0x3009: {"alias": 0x27E9, "dir": "V"}, # rangle
0xFE37: {"alias": 0x23DE, "dir": "H"}, # horizontal brace down
0xFE38: {"alias": 0x23DF, "dir": "H"} # horizontal brace up
}
DELIMITERS_EXTRA = [
0x0306,
0x0333,
0x033F,
0x2045,
0x2046,
0x20D0,
0x20D1,
0x20D6,
0x20D7,
0x20E1,
0x20E9,
0x20EE,
0x20EF,
0x21A9,
0x21AA,
0x2210,
0x2211,
0x2229,
0x222B,
0x222C,
0x222D,
0x222E,
0x222F,
0x2230,
0x2231,
0x2232,
0x2233,
0x22C0,
0x22C1,
0x22C2,
0x22C3,
0x23B4,
0x23B5,
0x23DC,
0x23DD,
0x23E0,
0x23E1,
0x27E6,
0x27E7,
0x27EA,
0x27EB,
0x29FC,
0x29FD,
0x2A00,
0x2A01,
0x2A02,
0x2A03,
0x2A04,
0x2A05,
0x2A06,
0x2A07,
0x2A08,
0x2A09,
0x2A0C,
0x2A0D,
0x2A0E,
0x2A0F,
0x2A10,
0x2A11,
0x2A12,
0x2A13,
0x2A14,
0x2A15,
0x2A16,
0x2A17,
0x2A18,
0x2A19,
0x2A1A,
0x2A1B,
0x2A1C
]
| 27.606349 | 84 | 0.554163 | # -*- Mode: Python; tab-width: 2; indent-tabs-mode:nil; -*-
# vim: set ts=2 et sw=2 tw=80:
#
# Copyright (c) 2013 MathJax Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
FONTFAMILY_PREFIX = "Asana MathJax"
FONTNAME_PREFIX = "AsanaMathJax"
MATHFONT = "Asana-Math.otf"
MAINFONTS = None
FONTSPLITTING_EXTRA = {
"Variants": [
("zero.onum", 0xE200), # old style numbers
("one.onum", 0xE201),
("two.onum", 0xE202),
("three.onum", 0xE203),
("four.onum", 0xE204),
("five.onum", 0xE205),
("six.onum", 0xE206),
("seven.onum", 0xE207),
("eight.onum", 0xE208),
("nine.onum", 0xE209),
("u1D49C.salt", 0xE20A), # script salt (used as caligraphic)
("uni212C.salt", 0xE20B),
("u1D49E.salt", 0xE20C),
("u1D49F.salt", 0xE20D),
("uni2130.salt", 0xE20E),
("uni2131.salt", 0xE20F),
("u1D4A2.salt", 0xE210),
("uni210B.salt", 0xE211),
("uni2110.salt", 0xE212),
("u1D4A5.salt", 0xE213),
("u1D4A6.salt", 0xE214),
("uni2112.salt", 0xE215),
("uni2133.salt", 0xE216),
("u1D4A9.salt", 0xE217),
("u1D4AA.salt", 0xE218),
("u1D4AB.salt", 0xE219),
("u1D4AC.salt", 0xE21A),
("uni211B.salt", 0xE21B),
("u1D4AE.salt", 0xE21C),
("u1D4AF.salt", 0xE21D),
("u1D4B0.salt", 0xE21E),
("u1D4B1.salt", 0xE21F),
("u1D4B2.salt", 0xE220),
("u1D4B3.salt", 0xE221),
("u1D4B4.salt", 0xE222),
("u1D4B5.salt", 0xE223),
("u1D4D0.salt", 0xE224), # bold script salt (used as bold caligraphic)
("u1D4D1.salt", 0xE225),
("u1D4D2.salt", 0xE226),
("u1D4D3.salt", 0xE227),
("u1D4D4.salt", 0xE228),
("u1D4D5.salt", 0xE229),
("u1D4D6.salt", 0xE22A),
("u1D4D7.salt", 0xE22B),
("u1D4D8.salt", 0xE22C),
("u1D4D9.salt", 0xE22D),
("u1D4DA.salt", 0xE22E),
("u1D4DB.salt", 0xE22F),
("u1D4DC.salt", 0xE230),
("u1D4DD.salt", 0xE231),
("u1D4DE.salt", 0xE232),
("u1D4DF.salt", 0xE233),
("u1D4E0.salt", 0xE234),
("u1D4E1.salt", 0xE235),
("u1D4E2.salt", 0xE236),
("u1D4E3.salt", 0xE237),
("u1D4E4.salt", 0xE238),
("u1D4E5.salt", 0xE239),
("u1D4E6.salt", 0xE23A),
("u1D4E7.salt", 0xE23B),
("u1D4E8.salt", 0xE23C),
("u1D4E9.salt", 0xE23D)
]
}
FONTSPLITTING_REMOVE = None
FONTDATA = {
"FileVersion": "2.3",
"Year": "2013",
"TeX_factor": None, # Leave None for automatic computation
"baselineskip": 1.2,
"lineH": .8,
"lineD": .2,
"hasStyleChar": True
}
RULECHAR = 0x0305
REMAP = {
0x00AF: 0x0304, # MACRON
0x02B9: 0x2032, # prime
0x03D2: 0x03A5, # Upsilon with hook
0x20F0: 0x002A, # (combining star above)
0x25AA: 0x25A0, # blacksquare
0x25B4: 0x25B2, # blacktriangle
0x25B5: 0x25B3, # blacktriangledown
0x25B8: 0x25B6, # blacktriangleright
0x25BE: 0x25BC, # blacktriangledown
0x25BF: 0x25BD, # triangledown
0x25C2: 0x25C0, # blactriangleleft
0x25C3: 0x25C1, # triangleleft
0x2758: 0x2223, # light vertical bar
0x3008: 0x27E8, # langle
0x3009: 0x27E9, # rangle
0xFE37: 0x23DE, 0xFE38: 0x23DF, # over and under braces
}
REMAPACCENT = {
"\u2192": "\u20D7", # vector arrow
"\u2032": "\u0301", # acute accent
"\u007E": "\u0303", # tilde
"\u2035": "\u0300", # grave accent
"\u005E": "\u0302", # hat
"\u0060": "\u0300",
"\u00B4": "\u0301"
}
REMAPACCENTUNDER = {
}
VARIANT = None
VARIANTFONTS = []
TEXCALIGRAPHIC = "offsetA: 0xE20A, noLowerCase: 1"
TEXCALIGRAPHICFONTS = ["VARIANTS"]
TEXOLDSTYLE = "offsetN: 0xE200"
TEXOLDSTYLEFONTS = ["VARIANTS"]
TEXCALIGRAPHICBOLD = "offsetA: 0xE224, noLowerCase: 1"
TEXCALIGRAPHICBOLDFONTS = ["VARIANTS"]
TEXOLDSTYLEBOLD = None
TEXOLDSTYLEBOLDFONTS = []
SANSSERIFGREEK = None
SANSSERIFITALICNUMBER = None
SANSSERIFITALICGREEK = None
SANSSERIFBOLDITALICNUMBER = None
SMALLOPFONTS = None
DELIMITERS = {
0x002D: {"alias": 0x0305, "dir": "H"}, # hyphen-minus
0x002F: {"alias": 0x2044, "dir": "H"}, # slash
0x003D: # equal sign
{
"dir": "H",
"HW": [0x003D],
"stretch": [(0x003D,"rep")]
},
0x005C: # reversed solidus
{
"dir": "V",
"HW": [0x005C]
},
0x002D: {"alias": 0x0305, "dir": "H"}, # minus
0x005E: {"alias": 0x0302, "dir": "H"}, # wide hat
0x005F: {"alias": 0x0332, "dir": "H"}, # low line
0x007E: {"alias": 0x0303, "dir": "H"}, # wide tilde
0x00AF: {"alias": 0x0305, "dir": "H"}, # macron
0x02C6: {"alias": 0x0302, "dir": "H"},
0x02C9: {"alias": 0x0305, "dir": "H"}, # macron
0x02DC: {"alias": 0x0303, "dir": "H"},
0x2015: {"alias": 0x0305, "dir": "H"}, # horizontal line
0x2017: {"alias": 0x0305, "dir": "H"}, # horizontal line
0x203E: {"alias": 0x0305, "dir": "H"}, # overline
0x2195: # updown arrow
{
"dir": "V",
"HW": [0x2195],
"stretch": [("arrowup","top"),("glyph3798","ext"),("arrowdown","bot")]
},
0x21D5: # updown double arrow
{
"dir": "V",
"HW": [0x2195],
"stretch": [("arrowdblup","top"),("glyph3799","ext"),("arrowdbldown","bot")]
},
0x2212: {"alias": 0x0305, "dir": "H"}, # minus
0x2215: {"alias": 0x2044, "dir": "V"}, # division slash
0x2312: {"alias": 0x23DC, "dir": "H"}, # arc
0x2322: {"alias": 0x23DC, "dir": "H"}, # frown
0x2323: {"alias": 0x23DD, "dir": "H"}, # smile
0x2329: {"alias": 0x27E8, "dir": "V"}, # langle
0x232A: {"alias": 0x27E9, "dir": "V"}, # rangle
0x23AA: # \bracevert
{
"dir": "V",
"HW": [0x23AA],
"stretch": [(0x23AA,"ext")]
},
0x23AF: # horizontal line
{
"dir": "H",
"HW": [0x23AF],
"stretch": [(0x23AF,"rep")]
},
0x23B0: {"alias": 0x27C6, "dir": "V"}, # \lmoustache
0x23B1: {"alias": 0x27C5, "dir": "V"}, # \rmoustache
0x23D0: # vertical line extension
{
"dir": "V",
"HW": [0x7C],
"stretch": [(0x7C,"ext")]
},
0x2500: {"alias": 0x0305, "dir": "H"},
0x2758: {"alias": 0x2223, "dir": "V"}, # vertical separator
0x27EE: {"alias": 0x0028, "dir": "V"},
0x27EF: {"alias": 0x0029, "dir": "V"},
0x27F5: {"alias": 0x2190, "dir": "H"}, # long left arrow
0x27F6: {"alias": 0x2192, "dir": "H"}, # long right arrow
0x27F7: {"alias": 0x2194, "dir": "H"}, # long left-right arrow
0x27F8: {"alias": 0x21D0, "dir": "H"}, # long left double arrow
0x27F9: {"alias": 0x21D2, "dir": "H"}, # long right double arrow
0x27FA: {"alias": 0x21D4, "dir": "H"}, # long left-right double arrow
0x27FB: {"alias": 0x21A4, "dir": "H"}, # long left arrow from bar
0x27FC: {"alias": 0x21A6, "dir": "H"}, # long right arrow from bar
0x27FD: {"alias": 0x2906, "dir": "H"}, # long left double arrow from bar
0x27FE: {"alias": 0x2907, "dir": "H"}, # long right double arrow from bar
0x3008: {"alias": 0x27E8, "dir": "V"}, # langle
0x3009: {"alias": 0x27E9, "dir": "V"}, # rangle
0xFE37: {"alias": 0x23DE, "dir": "H"}, # horizontal brace down
0xFE38: {"alias": 0x23DF, "dir": "H"} # horizontal brace up
}
DELIMITERS_EXTRA = [
0x0306,
0x0333,
0x033F,
0x2045,
0x2046,
0x20D0,
0x20D1,
0x20D6,
0x20D7,
0x20E1,
0x20E9,
0x20EE,
0x20EF,
0x21A9,
0x21AA,
0x2210,
0x2211,
0x2229,
0x222B,
0x222C,
0x222D,
0x222E,
0x222F,
0x2230,
0x2231,
0x2232,
0x2233,
0x22C0,
0x22C1,
0x22C2,
0x22C3,
0x23B4,
0x23B5,
0x23DC,
0x23DD,
0x23E0,
0x23E1,
0x27E6,
0x27E7,
0x27EA,
0x27EB,
0x29FC,
0x29FD,
0x2A00,
0x2A01,
0x2A02,
0x2A03,
0x2A04,
0x2A05,
0x2A06,
0x2A07,
0x2A08,
0x2A09,
0x2A0C,
0x2A0D,
0x2A0E,
0x2A0F,
0x2A10,
0x2A11,
0x2A12,
0x2A13,
0x2A14,
0x2A15,
0x2A16,
0x2A17,
0x2A18,
0x2A19,
0x2A1A,
0x2A1B,
0x2A1C
]
| 0 | 0 | 0 |
490f550c2793789f398c35b62fd7284c7ce09273 | 1,716 | py | Python | dags/pyspark/enem_join_final.py | edgallojr/igti-edc-mod4 | 199b8c1cafa295ae49b1b23b830dfc6d21db7f9c | [
"MIT"
] | null | null | null | dags/pyspark/enem_join_final.py | edgallojr/igti-edc-mod4 | 199b8c1cafa295ae49b1b23b830dfc6d21db7f9c | [
"MIT"
] | null | null | null | dags/pyspark/enem_join_final.py | edgallojr/igti-edc-mod4 | 199b8c1cafa295ae49b1b23b830dfc6d21db7f9c | [
"MIT"
] | null | null | null | from pyspark import SparkContext, SparkConf
from pyspark.sql import SparkSession
# set conf
conf = (
SparkConf()
.set("spark.hadoop.fs.s3a.fast.upload", True)
.set("spark.hadoop.fs.s3a.impl", "org.apache.hadoop.fs.s3a.S3AFileSystem")
.set('spark.hadoop.fs.s3a.aws.credentials.provider', 'com.amazonaws.auth.EnvironmentVariableCredentialsProvider')
.set('spark.jars.packages', 'org.apache.hadoop:hadoop-aws:2.7.3')
)
# apply config
sc = SparkContext(conf=conf).getOrCreate()
if __name__ == "__main__":
# init spark session
spark = SparkSession\
.builder\
.appName("ENEM Job")\
.getOrCreate()
spark.sparkContext.setLogLevel("WARN")
uf_idade = (
spark
.read
.format("parquet")
.load("s3a://dl-processing-zone-539445819060/intermediarias/uf_idade")
)
uf_sexo = (
spark
.read
.format("parquet")
.load("s3a://dl-processing-zone-539445819060/intermediarias/uf_sexo")
)
uf_notas = (
spark
.read
.format("parquet")
.load("s3a://dl-processing-zone-539445819060/intermediarias/uf_notas")
)
print("****************")
print("* JOIN FINAL *")
print("****************")
uf_final = (
uf_idade
.join(uf_sexo, on="SG_UF_RESIDENCIA", how="inner")
.join(uf_notas, on="SG_UF_RESIDENCIA", how="inner")
)
(
uf_final
.write
.mode("overwrite")
.format("parquet")
.save("s3a://dl-consumer-zone-539445819060/enem_uf")
)
print("*********************")
print("Escrito com sucesso!")
print("*********************")
spark.stop()
| 24.169014 | 117 | 0.567599 | from pyspark import SparkContext, SparkConf
from pyspark.sql import SparkSession
# set conf
conf = (
SparkConf()
.set("spark.hadoop.fs.s3a.fast.upload", True)
.set("spark.hadoop.fs.s3a.impl", "org.apache.hadoop.fs.s3a.S3AFileSystem")
.set('spark.hadoop.fs.s3a.aws.credentials.provider', 'com.amazonaws.auth.EnvironmentVariableCredentialsProvider')
.set('spark.jars.packages', 'org.apache.hadoop:hadoop-aws:2.7.3')
)
# apply config
sc = SparkContext(conf=conf).getOrCreate()
if __name__ == "__main__":
# init spark session
spark = SparkSession\
.builder\
.appName("ENEM Job")\
.getOrCreate()
spark.sparkContext.setLogLevel("WARN")
uf_idade = (
spark
.read
.format("parquet")
.load("s3a://dl-processing-zone-539445819060/intermediarias/uf_idade")
)
uf_sexo = (
spark
.read
.format("parquet")
.load("s3a://dl-processing-zone-539445819060/intermediarias/uf_sexo")
)
uf_notas = (
spark
.read
.format("parquet")
.load("s3a://dl-processing-zone-539445819060/intermediarias/uf_notas")
)
print("****************")
print("* JOIN FINAL *")
print("****************")
uf_final = (
uf_idade
.join(uf_sexo, on="SG_UF_RESIDENCIA", how="inner")
.join(uf_notas, on="SG_UF_RESIDENCIA", how="inner")
)
(
uf_final
.write
.mode("overwrite")
.format("parquet")
.save("s3a://dl-consumer-zone-539445819060/enem_uf")
)
print("*********************")
print("Escrito com sucesso!")
print("*********************")
spark.stop()
| 0 | 0 | 0 |
2adf70534d48e48c16a4e653ed9e91daa879d5eb | 500 | py | Python | api/migrations/20210706_01_a9a26-fix-flores-description.py | zpapakipos/dynabench-1 | 95884b4e29c57263dc1a85909be979c084d5fac3 | [
"MIT"
] | 15 | 2021-09-24T00:46:04.000Z | 2022-03-16T13:24:56.000Z | api/migrations/20210706_01_a9a26-fix-flores-description.py | zpapakipos/dynabench-1 | 95884b4e29c57263dc1a85909be979c084d5fac3 | [
"MIT"
] | 98 | 2021-09-22T12:33:21.000Z | 2022-03-21T22:23:52.000Z | api/migrations/20210706_01_a9a26-fix-flores-description.py | zpapakipos/dynabench-1 | 95884b4e29c57263dc1a85909be979c084d5fac3 | [
"MIT"
] | 12 | 2021-09-25T05:08:18.000Z | 2022-02-28T21:02:20.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Fix the Flores description."""
from yoyo import step
__depends__ = {"20210503_01_xxxx-add-flores-task"}
fix_desc = """Machine Translation Evaluation East Asian languages:
Javanese, Indonesian, Malay, Tagalog, Tamil, English"""
steps = [step(f'UPDATE tasks SET `desc`="{fix_desc}" WHERE task_code="flores_small2"')]
| 29.411765 | 87 | 0.748 | # Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Fix the Flores description."""
from yoyo import step
__depends__ = {"20210503_01_xxxx-add-flores-task"}
fix_desc = """Machine Translation Evaluation East Asian languages:
Javanese, Indonesian, Malay, Tagalog, Tamil, English"""
steps = [step(f'UPDATE tasks SET `desc`="{fix_desc}" WHERE task_code="flores_small2"')]
| 0 | 0 | 0 |
013e2e26f5ea37158de12fb17c6167cc62c21d74 | 7,490 | py | Python | dropletbuilder/tests/test_dropletbuilder.py | ftiet/droplet-builder | 5f8a9cd548a58207e45ca77adf92453410eda377 | [
"MIT"
] | 4 | 2019-12-17T13:36:11.000Z | 2020-11-08T23:47:41.000Z | dropletbuilder/tests/test_dropletbuilder.py | rmatsum836/droplet-builder | 5f8a9cd548a58207e45ca77adf92453410eda377 | [
"MIT"
] | 1 | 2019-03-28T02:56:11.000Z | 2019-03-28T02:56:11.000Z | dropletbuilder/tests/test_dropletbuilder.py | rmatsum836/droplet-builder | 5f8a9cd548a58207e45ca77adf92453410eda377 | [
"MIT"
] | 2 | 2019-03-22T20:29:01.000Z | 2020-05-04T16:33:57.000Z | import pytest
import sys
import numpy as np
import mbuild
from dropletbuilder.utils.io_tools import get_fn
"""
Unit Tests for Droplet class.
"""
| 40.928962 | 112 | 0.635381 | import pytest
import sys
import numpy as np
import mbuild
from dropletbuilder.utils.io_tools import get_fn
class BaseTest:
@pytest.fixture(autouse=True)
def initdir(self, tmpdir):
tmpdir.chdir()
@pytest.fixture
def GoldLattice(self):
lattice_compound = mbuild.Compound(name='Au')
lattice_spacing = [0.40788, 0.40788, 0.40788]
lattice_vector = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
gold_locations = [[0., 0., 0.], [.5, .5, 0.], [.5, 0., .5], [0, .5, .5]]
basis = {lattice_compound.name: gold_locations}
gold_lattice = mbuild.Lattice(
lattice_spacing=lattice_spacing,
lattice_vectors=lattice_vector,
lattice_points=basis)
return gold_lattice
@pytest.fixture
def Droplet(self):
from dropletbuilder.dropletbuilder import Droplet
water = mbuild.load(get_fn('tip3p.mol2'))
return Droplet(radius=1, angle=90.0, fluid=water, density=997)
@pytest.fixture
def DropletWithDims(self):
from dropletbuilder.dropletbuilder import Droplet
water = mbuild.load(get_fn('tip3p.mol2'))
return Droplet(radius=1, angle=90.0, fluid=water, density=997, x=4, y=4)
"""
Unit Tests for Droplet class.
"""
class TestDropletBuilder(BaseTest):
def test_dropletbuilder_imported(self):
"""Sample test, will always pass so long as import statement worked"""
assert "dropletbuilder" in sys.modules
def test_init_with_missing_fluid(self):
from dropletbuilder.dropletbuilder import Droplet
with pytest.raises(ValueError, match="Fluid droplet compounds"):
Droplet(radius=1, angle=90.0, density=997)
def test_init_with_missing_density(self):
from dropletbuilder.dropletbuilder import Droplet
water = mbuild.load(get_fn('tip3p.mol2'))
with pytest.raises(ValueError, match="Fluid density"):
Droplet(radius=1, angle=90.0, fluid=water)
def test_init_without_lattice_with_lattice_compound(self):
lattice_compound = mbuild.Compound(name='Au')
from dropletbuilder.dropletbuilder import Droplet
water = mbuild.load(get_fn('tip3p.mol2'))
with pytest.raises(ValueError, match="do not specify lattice_compound"):
Droplet(radius=1, angle=90.0, fluid=water, density=997, x=4, y=4, lattice_compound=lattice_compound)
def test_init_with_lattice_without_lattice_compound(self, GoldLattice):
from dropletbuilder.dropletbuilder import Droplet
water = mbuild.load(get_fn('tip3p.mol2'))
with pytest.raises(ValueError, match="Lattice compounds"):
Droplet(radius=1, angle=90.0, fluid=water, density=997, x=4, y=4, lattice=GoldLattice)
def test_init_with_too_small_x(self):
from dropletbuilder.dropletbuilder import Droplet
water = mbuild.load(get_fn('tip3p.mol2'))
with pytest.raises(ValueError, match="x .* at least"):
Droplet(radius=1, angle=90.0, fluid=water, density=997, x=1, y=4)
def test_init_with_too_small_y(self):
from dropletbuilder.dropletbuilder import Droplet
water = mbuild.load(get_fn('tip3p.mol2'))
with pytest.raises(ValueError, match="y .* at least"):
Droplet(radius=1, angle=90.0, fluid=water, density=997, x=4, y=1)
def test_init_with_too_large_x(self):
from dropletbuilder.dropletbuilder import Droplet
water = mbuild.load(get_fn('tip3p.mol2'))
with pytest.raises(ValueError, match="x .* 100"):
Droplet(radius=1, angle=90.0, fluid=water, density=997, x=101, y=4)
def test_init_with_too_large_y(self):
from dropletbuilder.dropletbuilder import Droplet
water = mbuild.load(get_fn('tip3p.mol2'))
with pytest.raises(ValueError, match="y .* 100"):
Droplet(radius=1, angle=90.0, fluid=water, density=997, x=4, y=101)
def test_save(self, Droplet):
Droplet.save('droplet.gro', overwrite=True, combine='all')
def test_save_with_dims(self, DropletWithDims):
DropletWithDims.save('droplet-with-dims.gro', overwrite=True, combine='all')
def test_hierarchy(self, Droplet):
assert len(Droplet.children) == 2
def test_hierarchy_with_dims(self, DropletWithDims):
assert len(DropletWithDims.children) == 2
def test_lateral_dims_near_x_spec(self):
from dropletbuilder.dropletbuilder import Droplet
water = mbuild.load(get_fn('tip3p.mol2'))
x = 4
droplet = Droplet(radius=1, angle=90.0, fluid=water, density=997, x=x)
assert abs(droplet.boundingbox.lengths[0] - x) < 0.5
def test_lateral_dims_near_y_spec(self):
from dropletbuilder.dropletbuilder import Droplet
water = mbuild.load(get_fn('tip3p.mol2'))
y = 4
droplet = Droplet(radius=1, angle=90.0, fluid=water, density=997, y=y)
assert abs(droplet.boundingbox.lengths[1] - y) < 0.5
def test_lateral_dims_near_x_y_spec(self):
from dropletbuilder.dropletbuilder import Droplet
water = mbuild.load(get_fn('tip3p.mol2'))
x = 4
y = 4
droplet = Droplet(radius=1, angle=90.0, fluid=water, density=997, x=x, y=y)
assert (abs(droplet.boundingbox.lengths[0] - x) < 0.5
and abs(droplet.boundingbox.lengths[1] - y) < 0.5)
def test_lateral_dims_in_box(self, Droplet):
for child in Droplet.children:
if (np.min(child.xyz, axis=0)[0] < 0
or np.min(child.xyz, axis=0)[1] < 0):
assert False
if (np.max(child.xyz, axis=0)[0] > Droplet.periodicity[0] or
np.max(child.xyz, axis=0)[1] > Droplet.periodicity[1]):
assert False
assert True
def test_lateral_dims_with_x_y_in_box(self, DropletWithDims):
for child in DropletWithDims.children:
if (np.min(child.xyz, axis=0)[0] < 0
or np.min(child.xyz, axis=0)[1] < 0):
assert False
if ((np.max(child.xyz, axis=0)[0] >
DropletWithDims.periodicity[0])
or (np.max(child.xyz, axis=0)[1] >
DropletWithDims.periodicity[1])):
assert False
assert True
def test_n_fluid_particles(self, Droplet):
n_fluid_particles = 0
for child in Droplet.children:
if child.name != 'LAT':
n_fluid_particles += child.n_particles
assert n_fluid_particles > 20 and n_fluid_particles < 150
def test_n_fluid_particles_with_x_y(self, DropletWithDims):
n_fluid_particles = 0
for child in DropletWithDims.children:
if child.name != 'LAT':
n_fluid_particles += child.n_particles
assert n_fluid_particles > 20 and n_fluid_particles < 150
def test_fluid_particles_in_sheets(self, Droplet):
for child in Droplet.children:
if child.name != 'LAT':
if (np.min(child.xyz, axis=0)[2] <
Droplet.surface_height + 0.001):
assert False
assert True
def test_fluid_particles_in_sheets_with_x_y(self, DropletWithDims):
for child in DropletWithDims.children:
if child.name != 'LAT':
if (np.min(child.xyz, axis=0)[2] <
DropletWithDims.surface_height + 0.001):
assert False
assert True
| 6,333 | 962 | 46 |
faacc746284c9cb9411340754bd24b3ef1455892 | 578 | py | Python | Number_of_Tags.py | blockchainhelppro/dataanalysis-Crypto | 15941b1f64af215c89d87e9b21469bd5b9f1138b | [
"MIT"
] | null | null | null | Number_of_Tags.py | blockchainhelppro/dataanalysis-Crypto | 15941b1f64af215c89d87e9b21469bd5b9f1138b | [
"MIT"
] | null | null | null | Number_of_Tags.py | blockchainhelppro/dataanalysis-Crypto | 15941b1f64af215c89d87e9b21469bd5b9f1138b | [
"MIT"
] | null | null | null | import xml.etree.cElementTree as ET
import pprint
from collections import defaultdict
import re
'''
The code below is to find out how many types of tags are there and the number of each tag.
'''
if __name__ == "__main__":
test() | 23.12 | 91 | 0.626298 | import xml.etree.cElementTree as ET
import pprint
from collections import defaultdict
import re
'''
The code below is to find out how many types of tags are there and the number of each tag.
'''
def count_tags(filename):
tags = {}
for event, element in ET.iterparse(filename):
if element.tag not in tags.keys():
tags[element.tag] = 1
else:
tags[element.tag] += 1
return tags
def test():
tags = count_tags('san-jose_california.osm')
pprint.pprint(tags)
if __name__ == "__main__":
test() | 284 | 0 | 50 |
4dde47225dc6718cce7a86dbac87d5e9e13b9956 | 188 | py | Python | packages/PIPS/validation/Effects/struct_enum01.py | DVSR1966/par4all | 86b33ca9da736e832b568c5637a2381f360f1996 | [
"MIT"
] | 51 | 2015-01-31T01:51:39.000Z | 2022-02-18T02:01:50.000Z | packages/PIPS/validation/Effects/struct_enum01.py | DVSR1966/par4all | 86b33ca9da736e832b568c5637a2381f360f1996 | [
"MIT"
] | 7 | 2017-05-29T09:29:00.000Z | 2019-03-11T16:01:39.000Z | packages/PIPS/validation/Effects/struct_enum01.py | DVSR1966/par4all | 86b33ca9da736e832b568c5637a2381f360f1996 | [
"MIT"
] | 12 | 2015-03-26T08:05:38.000Z | 2022-02-18T02:01:51.000Z | from validation import vworkspace
#import os
with vworkspace() as w:
#os.environ['PROPER_EFFECTS_DEBUG_LEVEL'] = '8'
w.all_functions.display(activate="print_code_proper_effects")
| 26.857143 | 65 | 0.771277 | from validation import vworkspace
#import os
with vworkspace() as w:
#os.environ['PROPER_EFFECTS_DEBUG_LEVEL'] = '8'
w.all_functions.display(activate="print_code_proper_effects")
| 0 | 0 | 0 |
2e85f6158ec674ad4e9ba40cb7bd212b4fe5fe4a | 699 | py | Python | aulas/Aula#9a.py | dani-fn/Projetinhos_Python | 692ff0a7f57d8f8f2e28f7b2c38bb6401e013bdb | [
"MIT"
] | null | null | null | aulas/Aula#9a.py | dani-fn/Projetinhos_Python | 692ff0a7f57d8f8f2e28f7b2c38bb6401e013bdb | [
"MIT"
] | null | null | null | aulas/Aula#9a.py | dani-fn/Projetinhos_Python | 692ff0a7f57d8f8f2e28f7b2c38bb6401e013bdb | [
"MIT"
] | null | null | null | frase = 'Curso em Vídeo Python'
print(frase.upper().count('O')) # Antes não tinha nenhum O maiúsculo, mas com a combinação feita, tem!
print(len(frase))
print(frase[0])
print(frase.replace('Python', 'Daniel'))
print(frase) # Não contou o 'Daniel' da linha anterior!
frase = frase.replace('Python', 'Daniel')
print(frase) # AGORA FOI, pq eu salvei na linha anterior!
print('Curso' in frase)
print(frase.find('Curso'))
print(frase.find('curso'))
print(frase.lower().find('curso'))
print(frase.split())
dividido = frase.split()
print(dividido)
print(dividido[2]) # Mostra a segunda parte do que foi dividido
print(dividido[2][3]) # Mostra o caractere 3 da segunda parte
| 38.833333 | 107 | 0.692418 | frase = 'Curso em Vídeo Python'
print(frase.upper().count('O')) # Antes não tinha nenhum O maiúsculo, mas com a combinação feita, tem!
print(len(frase))
print(frase[0])
print(frase.replace('Python', 'Daniel'))
print(frase) # Não contou o 'Daniel' da linha anterior!
frase = frase.replace('Python', 'Daniel')
print(frase) # AGORA FOI, pq eu salvei na linha anterior!
print('Curso' in frase)
print(frase.find('Curso'))
print(frase.find('curso'))
print(frase.lower().find('curso'))
print(frase.split())
dividido = frase.split()
print(dividido)
print(dividido[2]) # Mostra a segunda parte do que foi dividido
print(dividido[2][3]) # Mostra o caractere 3 da segunda parte
| 0 | 0 | 0 |
df66dd8a2155a45d1ccc9f19b29ed5e0169da35e | 558 | py | Python | many_users.py | csy1993/PythonPractice | 67efe09f6c01a90d4d39ccce45b00fb5980535d7 | [
"Apache-2.0"
] | null | null | null | many_users.py | csy1993/PythonPractice | 67efe09f6c01a90d4d39ccce45b00fb5980535d7 | [
"Apache-2.0"
] | null | null | null | many_users.py | csy1993/PythonPractice | 67efe09f6c01a90d4d39ccce45b00fb5980535d7 | [
"Apache-2.0"
] | null | null | null | '''
* @Author: csy
* @Date: 2019-04-28 13:55:42
* @Last Modified by: csy
* @Last Modified time: 2019-04-28 13:55:42
'''
users = {
'aeinstein': {
'first': 'albert',
'last': 'einstein',
'location': 'princeton',
},
'mcurie': {
'first': 'marie',
'last': 'curie',
'location': 'paris',
}
}
for username, userinfo in users.items():
print('Username:'+username.title())
print('\tFullname:'+userinfo['first']+'\t'+userinfo['last'])
print('\tLocation:'+userinfo['location'])
print()
| 23.25 | 64 | 0.53405 | '''
* @Author: csy
* @Date: 2019-04-28 13:55:42
* @Last Modified by: csy
* @Last Modified time: 2019-04-28 13:55:42
'''
users = {
'aeinstein': {
'first': 'albert',
'last': 'einstein',
'location': 'princeton',
},
'mcurie': {
'first': 'marie',
'last': 'curie',
'location': 'paris',
}
}
for username, userinfo in users.items():
print('Username:'+username.title())
print('\tFullname:'+userinfo['first']+'\t'+userinfo['last'])
print('\tLocation:'+userinfo['location'])
print()
| 0 | 0 | 0 |
f279d8497722db3069ab278db686c4ef9d42d7a2 | 378 | py | Python | blocks/__init__.py | dennisme/AESBlockCiphers | 2179a380f6ad3a1976ff8fea4f4b17b1cc3affd1 | [
"MIT"
] | null | null | null | blocks/__init__.py | dennisme/AESBlockCiphers | 2179a380f6ad3a1976ff8fea4f4b17b1cc3affd1 | [
"MIT"
] | null | null | null | blocks/__init__.py | dennisme/AESBlockCiphers | 2179a380f6ad3a1976ff8fea4f4b17b1cc3affd1 | [
"MIT"
] | null | null | null | __all__ = ['aesECB',
'aesCBC',
'aesCFB',
'aesOFB',
'aesCTR',
'padding',
'xor',
'chunk']
from blocks import aesECB
from blocks import aesCBC
from blocks import aesCFB
from blocks import aesOFB
from blocks import aesCTR
from blocks import padding
from blocks import xor
from blocks import chunk
| 19.894737 | 26 | 0.595238 | __all__ = ['aesECB',
'aesCBC',
'aesCFB',
'aesOFB',
'aesCTR',
'padding',
'xor',
'chunk']
from blocks import aesECB
from blocks import aesCBC
from blocks import aesCFB
from blocks import aesOFB
from blocks import aesCTR
from blocks import padding
from blocks import xor
from blocks import chunk
| 0 | 0 | 0 |
fa074e5e9056fb559e9b8abc0881ad6ae56b47c9 | 4,532 | py | Python | test/azure/Expected/AcceptanceTests/AzureParameterGrouping/azureparametergrouping/models/_models_py3.py | fearthecowboy/autorest.python | a251e361218598b55b0621db2275aafcb7158a5c | [
"MIT"
] | null | null | null | test/azure/Expected/AcceptanceTests/AzureParameterGrouping/azureparametergrouping/models/_models_py3.py | fearthecowboy/autorest.python | a251e361218598b55b0621db2275aafcb7158a5c | [
"MIT"
] | null | null | null | test/azure/Expected/AcceptanceTests/AzureParameterGrouping/azureparametergrouping/models/_models_py3.py | fearthecowboy/autorest.python | a251e361218598b55b0621db2275aafcb7158a5c | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
from msrest.exceptions import HttpOperationError
class CloudError(Model):
"""CloudError.
"""
_attribute_map = {
}
class Error(Model):
"""Error.
:param status:
:type status: int
:param message:
:type message: str
"""
_attribute_map = {
'status': {'key': 'status', 'type': 'int'},
'message': {'key': 'message', 'type': 'str'},
}
class ErrorException(HttpOperationError):
"""Server responsed with exception of type: 'Error'.
:param deserialize: A deserializer
:param response: Server response to be deserialized.
"""
class FirstParameterGroup(Model):
"""Additional parameters for a set of operations, such as:
ParameterGrouping_post_multi_param_groups,
ParameterGrouping_post_shared_parameter_group_object.
:param header_one:
:type header_one: str
:param query_one: Query parameter with default. Default value: 30 .
:type query_one: int
"""
_attribute_map = {
'header_one': {'key': '', 'type': 'str'},
'query_one': {'key': '', 'type': 'int'},
}
class ParameterGroupingPostMultiParamGroupsSecondParamGroup(Model):
"""Additional parameters for post_multi_param_groups operation.
:param header_two:
:type header_two: str
:param query_two: Query parameter with default. Default value: 30 .
:type query_two: int
"""
_attribute_map = {
'header_two': {'key': '', 'type': 'str'},
'query_two': {'key': '', 'type': 'int'},
}
class ParameterGroupingPostOptionalParameters(Model):
"""Additional parameters for post_optional operation.
:param custom_header:
:type custom_header: str
:param query: Query parameter with default. Default value: 30 .
:type query: int
"""
_attribute_map = {
'custom_header': {'key': '', 'type': 'str'},
'query': {'key': '', 'type': 'int'},
}
class ParameterGroupingPostRequiredParameters(Model):
"""Additional parameters for post_required operation.
All required parameters must be populated in order to send to Azure.
:param body: Required.
:type body: int
:param custom_header:
:type custom_header: str
:param query: Query parameter with default. Default value: 30 .
:type query: int
:param path: Required. Path parameter
:type path: str
"""
_validation = {
'body': {'required': True},
'path': {'required': True},
}
_attribute_map = {
'body': {'key': '', 'type': 'int'},
'custom_header': {'key': '', 'type': 'str'},
'query': {'key': '', 'type': 'int'},
'path': {'key': '', 'type': 'str'},
}
| 30.013245 | 106 | 0.621801 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
from msrest.exceptions import HttpOperationError
class CloudError(Model):
"""CloudError.
"""
_attribute_map = {
}
class Error(Model):
"""Error.
:param status:
:type status: int
:param message:
:type message: str
"""
_attribute_map = {
'status': {'key': 'status', 'type': 'int'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(self, *, status: int=None, message: str=None, **kwargs) -> None:
super(Error, self).__init__(**kwargs)
self.status = status
self.message = message
class ErrorException(HttpOperationError):
"""Server responsed with exception of type: 'Error'.
:param deserialize: A deserializer
:param response: Server response to be deserialized.
"""
def __init__(self, deserialize, response, *args):
super(ErrorException, self).__init__(deserialize, response, 'Error', *args)
class FirstParameterGroup(Model):
"""Additional parameters for a set of operations, such as:
ParameterGrouping_post_multi_param_groups,
ParameterGrouping_post_shared_parameter_group_object.
:param header_one:
:type header_one: str
:param query_one: Query parameter with default. Default value: 30 .
:type query_one: int
"""
_attribute_map = {
'header_one': {'key': '', 'type': 'str'},
'query_one': {'key': '', 'type': 'int'},
}
def __init__(self, *, header_one: str=None, query_one: int=30, **kwargs) -> None:
super(FirstParameterGroup, self).__init__(**kwargs)
self.header_one = header_one
self.query_one = query_one
class ParameterGroupingPostMultiParamGroupsSecondParamGroup(Model):
"""Additional parameters for post_multi_param_groups operation.
:param header_two:
:type header_two: str
:param query_two: Query parameter with default. Default value: 30 .
:type query_two: int
"""
_attribute_map = {
'header_two': {'key': '', 'type': 'str'},
'query_two': {'key': '', 'type': 'int'},
}
def __init__(self, *, header_two: str=None, query_two: int=30, **kwargs) -> None:
super(ParameterGroupingPostMultiParamGroupsSecondParamGroup, self).__init__(**kwargs)
self.header_two = header_two
self.query_two = query_two
class ParameterGroupingPostOptionalParameters(Model):
"""Additional parameters for post_optional operation.
:param custom_header:
:type custom_header: str
:param query: Query parameter with default. Default value: 30 .
:type query: int
"""
_attribute_map = {
'custom_header': {'key': '', 'type': 'str'},
'query': {'key': '', 'type': 'int'},
}
def __init__(self, *, custom_header: str=None, query: int=30, **kwargs) -> None:
super(ParameterGroupingPostOptionalParameters, self).__init__(**kwargs)
self.custom_header = custom_header
self.query = query
class ParameterGroupingPostRequiredParameters(Model):
"""Additional parameters for post_required operation.
All required parameters must be populated in order to send to Azure.
:param body: Required.
:type body: int
:param custom_header:
:type custom_header: str
:param query: Query parameter with default. Default value: 30 .
:type query: int
:param path: Required. Path parameter
:type path: str
"""
_validation = {
'body': {'required': True},
'path': {'required': True},
}
_attribute_map = {
'body': {'key': '', 'type': 'int'},
'custom_header': {'key': '', 'type': 'str'},
'query': {'key': '', 'type': 'int'},
'path': {'key': '', 'type': 'str'},
}
def __init__(self, *, body: int, path: str, custom_header: str=None, query: int=30, **kwargs) -> None:
super(ParameterGroupingPostRequiredParameters, self).__init__(**kwargs)
self.body = body
self.custom_header = custom_header
self.query = query
self.path = path
| 1,183 | 0 | 162 |
cc07f893fa695d5935ee5d0a272a35b1c4ecd157 | 5,348 | py | Python | src/oci/data_safe/models/random_digits_format_entry.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | src/oci/data_safe/models/random_digits_format_entry.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | src/oci/data_safe/models/random_digits_format_entry.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | # coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from .format_entry import FormatEntry
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class RandomDigitsFormatEntry(FormatEntry):
"""
The Random Digits masking format generates random digits of length within a range.
The length range is defined by the startLength and endLength attributes. The start
length must be less than or equal to the end length. When masking columns with
uniqueness constraint, ensure that the length range is sufficient enough to generate
unique values. This masking format pads to the appropriate length in a string, but
does not pad when used for a number column. It's a complementary type of Random Number,
which is not padded.
"""
def __init__(self, **kwargs):
"""
Initializes a new RandomDigitsFormatEntry object with values from keyword arguments. The default value of the :py:attr:`~oci.data_safe.models.RandomDigitsFormatEntry.type` attribute
of this class is ``RANDOM_DIGITS`` and it should not be changed.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param type:
The value to assign to the type property of this RandomDigitsFormatEntry.
Allowed values for this property are: "DELETE_ROWS", "DETERMINISTIC_SUBSTITUTION", "DETERMINISTIC_ENCRYPTION", "DETERMINISTIC_ENCRYPTION_DATE", "FIXED_NUMBER", "FIXED_STRING", "LIBRARY_MASKING_FORMAT", "NULL_VALUE", "POST_PROCESSING_FUNCTION", "PRESERVE_ORIGINAL_DATA", "RANDOM_DATE", "RANDOM_DECIMAL_NUMBER", "RANDOM_DIGITS", "RANDOM_LIST", "RANDOM_NUMBER", "RANDOM_STRING", "RANDOM_SUBSTITUTION", "REGULAR_EXPRESSION", "SHUFFLE", "SQL_EXPRESSION", "SUBSTRING", "TRUNCATE_TABLE", "USER_DEFINED_FUNCTION"
:type type: str
:param description:
The value to assign to the description property of this RandomDigitsFormatEntry.
:type description: str
:param start_length:
The value to assign to the start_length property of this RandomDigitsFormatEntry.
:type start_length: int
:param end_length:
The value to assign to the end_length property of this RandomDigitsFormatEntry.
:type end_length: int
"""
self.swagger_types = {
'type': 'str',
'description': 'str',
'start_length': 'int',
'end_length': 'int'
}
self.attribute_map = {
'type': 'type',
'description': 'description',
'start_length': 'startLength',
'end_length': 'endLength'
}
self._type = None
self._description = None
self._start_length = None
self._end_length = None
self._type = 'RANDOM_DIGITS'
@property
def start_length(self):
"""
**[Required]** Gets the start_length of this RandomDigitsFormatEntry.
The minimum number of digits the generated values should have. It can be
any integer greater than zero, but it must be less than or equal to the
end length.
:return: The start_length of this RandomDigitsFormatEntry.
:rtype: int
"""
return self._start_length
@start_length.setter
def start_length(self, start_length):
"""
Sets the start_length of this RandomDigitsFormatEntry.
The minimum number of digits the generated values should have. It can be
any integer greater than zero, but it must be less than or equal to the
end length.
:param start_length: The start_length of this RandomDigitsFormatEntry.
:type: int
"""
self._start_length = start_length
@property
def end_length(self):
"""
**[Required]** Gets the end_length of this RandomDigitsFormatEntry.
The maximum number of digits the generated values should have. It can
be any integer greater than zero, but it must be greater than or equal
to the start length.
:return: The end_length of this RandomDigitsFormatEntry.
:rtype: int
"""
return self._end_length
@end_length.setter
def end_length(self, end_length):
"""
Sets the end_length of this RandomDigitsFormatEntry.
The maximum number of digits the generated values should have. It can
be any integer greater than zero, but it must be greater than or equal
to the start length.
:param end_length: The end_length of this RandomDigitsFormatEntry.
:type: int
"""
self._end_length = end_length
| 40.210526 | 516 | 0.679506 | # coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from .format_entry import FormatEntry
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class RandomDigitsFormatEntry(FormatEntry):
"""
The Random Digits masking format generates random digits of length within a range.
The length range is defined by the startLength and endLength attributes. The start
length must be less than or equal to the end length. When masking columns with
uniqueness constraint, ensure that the length range is sufficient enough to generate
unique values. This masking format pads to the appropriate length in a string, but
does not pad when used for a number column. It's a complementary type of Random Number,
which is not padded.
"""
def __init__(self, **kwargs):
"""
Initializes a new RandomDigitsFormatEntry object with values from keyword arguments. The default value of the :py:attr:`~oci.data_safe.models.RandomDigitsFormatEntry.type` attribute
of this class is ``RANDOM_DIGITS`` and it should not be changed.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param type:
The value to assign to the type property of this RandomDigitsFormatEntry.
Allowed values for this property are: "DELETE_ROWS", "DETERMINISTIC_SUBSTITUTION", "DETERMINISTIC_ENCRYPTION", "DETERMINISTIC_ENCRYPTION_DATE", "FIXED_NUMBER", "FIXED_STRING", "LIBRARY_MASKING_FORMAT", "NULL_VALUE", "POST_PROCESSING_FUNCTION", "PRESERVE_ORIGINAL_DATA", "RANDOM_DATE", "RANDOM_DECIMAL_NUMBER", "RANDOM_DIGITS", "RANDOM_LIST", "RANDOM_NUMBER", "RANDOM_STRING", "RANDOM_SUBSTITUTION", "REGULAR_EXPRESSION", "SHUFFLE", "SQL_EXPRESSION", "SUBSTRING", "TRUNCATE_TABLE", "USER_DEFINED_FUNCTION"
:type type: str
:param description:
The value to assign to the description property of this RandomDigitsFormatEntry.
:type description: str
:param start_length:
The value to assign to the start_length property of this RandomDigitsFormatEntry.
:type start_length: int
:param end_length:
The value to assign to the end_length property of this RandomDigitsFormatEntry.
:type end_length: int
"""
self.swagger_types = {
'type': 'str',
'description': 'str',
'start_length': 'int',
'end_length': 'int'
}
self.attribute_map = {
'type': 'type',
'description': 'description',
'start_length': 'startLength',
'end_length': 'endLength'
}
self._type = None
self._description = None
self._start_length = None
self._end_length = None
self._type = 'RANDOM_DIGITS'
@property
def start_length(self):
"""
**[Required]** Gets the start_length of this RandomDigitsFormatEntry.
The minimum number of digits the generated values should have. It can be
any integer greater than zero, but it must be less than or equal to the
end length.
:return: The start_length of this RandomDigitsFormatEntry.
:rtype: int
"""
return self._start_length
@start_length.setter
def start_length(self, start_length):
"""
Sets the start_length of this RandomDigitsFormatEntry.
The minimum number of digits the generated values should have. It can be
any integer greater than zero, but it must be less than or equal to the
end length.
:param start_length: The start_length of this RandomDigitsFormatEntry.
:type: int
"""
self._start_length = start_length
@property
def end_length(self):
"""
**[Required]** Gets the end_length of this RandomDigitsFormatEntry.
The maximum number of digits the generated values should have. It can
be any integer greater than zero, but it must be greater than or equal
to the start length.
:return: The end_length of this RandomDigitsFormatEntry.
:rtype: int
"""
return self._end_length
@end_length.setter
def end_length(self, end_length):
"""
Sets the end_length of this RandomDigitsFormatEntry.
The maximum number of digits the generated values should have. It can
be any integer greater than zero, but it must be greater than or equal
to the start length.
:param end_length: The end_length of this RandomDigitsFormatEntry.
:type: int
"""
self._end_length = end_length
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 177 | 0 | 81 |
29fa035934fe9456c08fa3d09c2af1de16124dc1 | 311 | py | Python | problem_3.py | bazitur/my-euler-solutions | eb70c9866989dee16677819901b1a2e3206b2725 | [
"MIT"
] | null | null | null | problem_3.py | bazitur/my-euler-solutions | eb70c9866989dee16677819901b1a2e3206b2725 | [
"MIT"
] | null | null | null | problem_3.py | bazitur/my-euler-solutions | eb70c9866989dee16677819901b1a2e3206b2725 | [
"MIT"
] | null | null | null | #/usr/bin/env python3
if __name__ == "__main__":
print(max(factorize(int(input("Enter a number: ")))))
| 20.733333 | 57 | 0.466238 | #/usr/bin/env python3
def factorize(n):
for i in range(2, int(n ** 0.5+1)):
if n == 1:
break
while n % i == 0:
yield i
n /= i
else:
if n != 1: yield int(n)
if __name__ == "__main__":
print(max(factorize(int(input("Enter a number: ")))))
| 180 | 0 | 23 |
d9ea27ea9c60e60e09f05b7a12fa1a6dfefbd94d | 649 | py | Python | tests/test_basic.py | aleprada/PyMISP_CISA_alerts | 4c5f53970e0e43491c4fc071a5bf647b8f6b29b7 | [
"MIT"
] | 2 | 2021-05-06T09:09:30.000Z | 2021-05-14T12:48:42.000Z | tests/test_basic.py | aleprada/PyMISP_CISA_alerts | 4c5f53970e0e43491c4fc071a5bf647b8f6b29b7 | [
"MIT"
] | null | null | null | tests/test_basic.py | aleprada/PyMISP_CISA_alerts | 4c5f53970e0e43491c4fc071a5bf647b8f6b29b7 | [
"MIT"
] | 1 | 2021-05-06T09:09:31.000Z | 2021-05-06T09:09:31.000Z | import pytest
from cisa.cisa import get_ics_threats, get_vulnerability_reports
from config.config import get_software_list, config_parser, config_parser_section
| 22.37931 | 81 | 0.742681 | import pytest
from cisa.cisa import get_ics_threats, get_vulnerability_reports
from config.config import get_software_list, config_parser, config_parser_section
def test_config_parser():
result = config_parser("misp","url")
assert len(result) > 0
def test_config_parser_section():
result = config_parser_section("misp")
assert len(result) > 0
def test_software_list():
result = get_software_list()
assert len(result) > 0
def test_get_ics_threats():
threats = get_ics_threats()
assert len(threats) >= 0
def test_get_vulnerability_reports():
vulns = get_vulnerability_reports()
assert len(vulns) >= 0
| 368 | 0 | 115 |
839bb6f494bb25acb34abb8eb5f79588bb7a6a50 | 241 | py | Python | students/K33402/Michshenko_Violetta/Lr2/django_project_Michshenko/homeworks/admin.py | mynamesvioletta/ITMO_ICT_WebDevelopment_2021-2022 | 0314e5ec29718af5570662486621983cd6c1332a | [
"MIT"
] | null | null | null | students/K33402/Michshenko_Violetta/Lr2/django_project_Michshenko/homeworks/admin.py | mynamesvioletta/ITMO_ICT_WebDevelopment_2021-2022 | 0314e5ec29718af5570662486621983cd6c1332a | [
"MIT"
] | null | null | null | students/K33402/Michshenko_Violetta/Lr2/django_project_Michshenko/homeworks/admin.py | mynamesvioletta/ITMO_ICT_WebDevelopment_2021-2022 | 0314e5ec29718af5570662486621983cd6c1332a | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Homework, Done
@admin.register(Homework)
@admin.register(Done)
# Register your models here.
| 17.214286 | 38 | 0.771784 | from django.contrib import admin
from .models import Homework, Done
@admin.register(Homework)
class Homeworkadmin(admin.ModelAdmin):
pass
@admin.register(Done)
class Doneadmin(admin.ModelAdmin):
pass
# Register your models here.
| 0 | 48 | 44 |
d935367ad31f6f824a4fef8bb25981dd415424f2 | 1,407 | py | Python | mine/renderer/train_renderer.py | jefequien/ICCV2019-LearningToPaint | 8d638a9a6782e39386b2083ea0eb85fed8c0a7ab | [
"MIT"
] | null | null | null | mine/renderer/train_renderer.py | jefequien/ICCV2019-LearningToPaint | 8d638a9a6782e39386b2083ea0eb85fed8c0a7ab | [
"MIT"
] | null | null | null | mine/renderer/train_renderer.py | jefequien/ICCV2019-LearningToPaint | 8d638a9a6782e39386b2083ea0eb85fed8c0a7ab | [
"MIT"
] | null | null | null | import argparse
import os
import random
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from gym_dataset import GymDataset
from gym_canvas import CanvasEnv
from model import FCN
if __name__ == "__main__":
main()
| 21.984375 | 72 | 0.621891 | import argparse
import os
import random
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from gym_dataset import GymDataset
from gym_canvas import CanvasEnv
from model import FCN
class RendererTrainer:
def __init__(self, env, model):
self.device = torch.device("cuda")
self.lr = 3e-6
self.env = env
self.dataset = GymDataset(env)
self.model = model.to(self.device)
self.criterion = nn.MSELoss()
self.optimizer = optim.Adam(self.model.parameters(), lr=self.lr)
def train(self):
train_loader = DataLoader(self.dataset)
self.model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(self.device), target.to(self.device)
print(data.shape, target.shape)
continue
output = self.model(data)
loss = criterion(output, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
def valid(self):
pass
def visualize(self):
pass
def main():
parser = argparse.ArgumentParser(description='Renderer Training')
args = parser.parse_args()
model = FCN()
env = CanvasEnv()
trainer = RendererTrainer(env, model)
trainer.train()
if __name__ == "__main__":
main()
| 977 | 1 | 154 |
896f54576f48c217af8bc801ac04da0c61b592f3 | 398 | py | Python | resources/models/incidentModel.py | Jonasdart/SESP-API | 7a86caac09327917b7ee93df14b66a449c988bfa | [
"MIT"
] | null | null | null | resources/models/incidentModel.py | Jonasdart/SESP-API | 7a86caac09327917b7ee93df14b66a449c988bfa | [
"MIT"
] | 1 | 2020-05-21T18:25:07.000Z | 2020-05-21T18:25:07.000Z | resources/models/incidentModel.py | duzzsys/SESP-API | 7a86caac09327917b7ee93df14b66a449c988bfa | [
"MIT"
] | 1 | 2020-11-22T19:19:45.000Z | 2020-11-22T19:19:45.000Z | #encoding utf-8
#__author__ = Jonas Duarte, duarte.jsystem@gmail.com
#Python3
__author__ = 'Jonas Duarte'
| 19.9 | 60 | 0.748744 | #encoding utf-8
#__author__ = Jonas Duarte, duarte.jsystem@gmail.com
#Python3
__author__ = 'Jonas Duarte'
class IncidentModel():
def _search_incidents_by_name(self, name):
raise NotImplementedError
def _search_incidents_by_description(self, description):
raise NotImplementedError
def _search_incident_by_id(self, incident_id):
raise NotImplementedError | 183 | 1 | 104 |
6fb5fa8c1118a7d60cd0ea69433cc2b5d47fc852 | 4,116 | py | Python | setup.py | Dalbasar/background-zmq-ipython | 6b9b523b01a3424999404a2af6037ff4945d13e0 | [
"BSD-2-Clause"
] | 12 | 2019-05-30T09:34:21.000Z | 2022-03-14T04:43:54.000Z | setup.py | Dalbasar/background-zmq-ipython | 6b9b523b01a3424999404a2af6037ff4945d13e0 | [
"BSD-2-Clause"
] | 11 | 2019-09-28T10:52:16.000Z | 2022-03-14T05:06:54.000Z | setup.py | Dalbasar/background-zmq-ipython | 6b9b523b01a3424999404a2af6037ff4945d13e0 | [
"BSD-2-Clause"
] | 3 | 2020-02-01T01:26:26.000Z | 2021-12-14T19:29:56.000Z |
"""
Usage:
Create ~/.pypirc with info:
[distutils]
index-servers =
pypi
[pypi]
repository: https://upload.pypi.org/legacy/
username: ...
password: ...
(Not needed anymore) Registering the project: python3 setup.py register
New release: python3 setup.py sdist upload
I had some trouble at some point, and this helped:
pip3 install --user twine
python3 setup.py sdist
twine upload dist/background_zmq_ipython-*
See also MANIFEST.in for included files.
For debugging this script:
python3 setup.py sdist
pip3 install --user dist/...*.tar.gz -v
(Without -v, all stdout/stderr from here will not be shown.)
"""
from distutils.core import setup
import time
from pprint import pprint
import os
import sys
from subprocess import Popen, check_output, PIPE
def parse_pkg_info(fn):
"""
:param str fn:
:rtype: dict[str,str]
"""
res = {}
for ln in open(fn).read().splitlines():
if not ln or not ln[:1].strip():
continue
key, value = ln.split(": ", 1)
res[key] = value
return res
if os.path.exists("PKG-INFO"):
print("Found existing PKG-INFO.")
info = parse_pkg_info("PKG-INFO")
version = info["Version"]
print("Version via PKG-INFO:", version)
else:
try:
version = git_head_version()
print("Version via Git:", version)
except Exception as exc:
print("Exception while getting Git version:", exc)
sys.excepthook(*sys.exc_info())
version = time.strftime("1.%Y%m%d.%H%M%S", time.gmtime())
print("Version via current time:", version)
if os.environ.get("DEBUG", "") == "1":
debug_print_file(".")
debug_print_file("PKG-INFO")
setup(
name='background_zmq_ipython',
version=version,
packages=['background_zmq_ipython'],
package_dir={'background_zmq_ipython': ''},
description='Background ZMQ IPython/Jupyter kernel',
author='Albert Zeyer',
author_email='albzey@gmail.com',
url='https://github.com/albertz/background-zmq-ipython',
license='2-clause BSD license',
long_description=open('README.rst').read(),
install_requires=open('requirements.txt').read().splitlines(),
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
| 28 | 102 | 0.63484 |
"""
Usage:
Create ~/.pypirc with info:
[distutils]
index-servers =
pypi
[pypi]
repository: https://upload.pypi.org/legacy/
username: ...
password: ...
(Not needed anymore) Registering the project: python3 setup.py register
New release: python3 setup.py sdist upload
I had some trouble at some point, and this helped:
pip3 install --user twine
python3 setup.py sdist
twine upload dist/background_zmq_ipython-*
See also MANIFEST.in for included files.
For debugging this script:
python3 setup.py sdist
pip3 install --user dist/...*.tar.gz -v
(Without -v, all stdout/stderr from here will not be shown.)
"""
from distutils.core import setup
import time
from pprint import pprint
import os
import sys
from subprocess import Popen, check_output, PIPE
def debug_print_file(fn):
print("%s:" % fn)
if not os.path.exists(fn):
print("<does not exist>")
return
if os.path.isdir(fn):
print("<dir:>")
pprint(os.listdir(fn))
return
print(open(fn).read())
def parse_pkg_info(fn):
"""
:param str fn:
:rtype: dict[str,str]
"""
res = {}
for ln in open(fn).read().splitlines():
if not ln or not ln[:1].strip():
continue
key, value = ln.split(": ", 1)
res[key] = value
return res
def git_commit_rev(commit="HEAD", git_dir="."):
if commit is None:
commit = "HEAD"
return check_output(["git", "rev-parse", "--short", commit], cwd=git_dir).decode("utf8").strip()
def git_is_dirty(git_dir="."):
proc = Popen(["git", "diff", "--no-ext-diff", "--quiet", "--exit-code"], cwd=git_dir, stdout=PIPE)
proc.communicate()
if proc.returncode == 0:
return False
if proc.returncode == 1:
return True
raise Exception("unexpected return code %i" % proc.returncode)
def git_commit_date(commit="HEAD", git_dir="."):
out = check_output(["git", "show", "-s", "--format=%ci", commit], cwd=git_dir).decode("utf8")
out = out.strip()[:-6].replace(":", "").replace("-", "").replace(" ", ".")
return out
def git_head_version(git_dir="."):
commit_date = git_commit_date(git_dir=git_dir) # like "20190202.154527"
# rev = git_commit_rev(git_dir=git_dir)
# is_dirty = git_is_dirty(git_dir=git_dir)
# Make this distutils.version.StrictVersion compatible.
return "1.%s" % commit_date
if os.path.exists("PKG-INFO"):
print("Found existing PKG-INFO.")
info = parse_pkg_info("PKG-INFO")
version = info["Version"]
print("Version via PKG-INFO:", version)
else:
try:
version = git_head_version()
print("Version via Git:", version)
except Exception as exc:
print("Exception while getting Git version:", exc)
sys.excepthook(*sys.exc_info())
version = time.strftime("1.%Y%m%d.%H%M%S", time.gmtime())
print("Version via current time:", version)
if os.environ.get("DEBUG", "") == "1":
debug_print_file(".")
debug_print_file("PKG-INFO")
setup(
name='background_zmq_ipython',
version=version,
packages=['background_zmq_ipython'],
package_dir={'background_zmq_ipython': ''},
description='Background ZMQ IPython/Jupyter kernel',
author='Albert Zeyer',
author_email='albzey@gmail.com',
url='https://github.com/albertz/background-zmq-ipython',
license='2-clause BSD license',
long_description=open('README.rst').read(),
install_requires=open('requirements.txt').read().splitlines(),
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
| 1,196 | 0 | 115 |
9b8bb54dc558a7e41047440e0f04aef4061a7ef6 | 477 | py | Python | instance/config.py | paulzay/Sendit2 | e1da79c4c49547ac63834338583bfb39f9e2b25c | [
"MIT"
] | 3 | 2019-01-23T11:15:24.000Z | 2020-08-06T11:56:39.000Z | instance/config.py | paulzay/Sendit2 | e1da79c4c49547ac63834338583bfb39f9e2b25c | [
"MIT"
] | null | null | null | instance/config.py | paulzay/Sendit2 | e1da79c4c49547ac63834338583bfb39f9e2b25c | [
"MIT"
] | 1 | 2019-01-24T05:38:07.000Z | 2019-01-24T05:38:07.000Z | import os
app_config = dict(
development = DevelopmentConfig,
testing = TestingConfig,
production = ProductionConfig
) | 17.035714 | 36 | 0.679245 | import os
class BaseConfig:
DEBUG = False
TESTING = False
class DevelopmentConfig(BaseConfig):
ENV = 'development'
DEBUG = True
TESTING = False
class TestingConfig(BaseConfig):
ENV = 'testing'
DEBUG = True
TESTING = True
class ProductionConfig(BaseConfig):
ENV = 'production'
DEBUG = False
TESTING = False
app_config = dict(
development = DevelopmentConfig,
testing = TestingConfig,
production = ProductionConfig
) | 0 | 252 | 92 |
2b9ca9f6cb11eea479bcbdb32e35d05807baf760 | 16,543 | py | Python | wautils.py | sudobob/WildApricotUtils | 8b633fde0a845f7fe0da84a1dc08b926dff07ef1 | [
"MIT"
] | null | null | null | wautils.py | sudobob/WildApricotUtils | 8b633fde0a845f7fe0da84a1dc08b926dff07ef1 | [
"MIT"
] | 5 | 2021-03-25T14:42:23.000Z | 2021-06-02T00:38:58.000Z | wautils.py | sudobob/WildApricotUtils | 8b633fde0a845f7fe0da84a1dc08b926dff07ef1 | [
"MIT"
] | 3 | 2020-03-08T19:56:27.000Z | 2021-03-25T19:56:24.000Z | #!/usr/bin/env python3
"""
wautils
A set of web-based tools for Wild Apricot Integration
o Accepts your Wild Apricot Credentials via Wild Apricot OAuth
o Determines if you are have Wild Apricot admin credentials
o Give you further access only if you have admin credentials
"""
usage_mesg = """
usage:
wautils [--debug]
Start up wautils web server
"""
from flask import Flask, redirect, url_for, render_template, flash, g, request, send_file
from flask_cors import CORS
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager, UserMixin, login_user, logout_user, current_user, login_required
from flask_bootstrap import Bootstrap
from flask_restful import Resource as FlaskRestResource
from flask_restful import reqparse as FlaskRestReqparse
from flask_restful import Api as FlaskRestAPI
from flask_restful import request as FlaskRestRequest
import WaApi
import urllib
import os,sys,requests
from dotenv import load_dotenv
from oauth import OAuthSignIn
import getopt
import json
import random
import csv
pos_ran_chars = 'abcdefghijknpqrstuvwxyz23456789'
# for debugging
import pprint
ex_code_fail = 1 # used with sys.exit()
ex_code_success = 0
# get keys and config info from .env
load_dotenv()
wa_uri_prefix = "https://api.wildapricot.org/v2.1/"
wa_uri_prefix_accounts = wa_uri_prefix + "Accounts/"
app = Flask(__name__)
app.secret_key = os.environ['FLASK_SECRET_KEY']
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.sqlite'
app.config['OAUTH_CREDENTIALS'] = {
'wildapricot' : {
'account' : os.environ['WA_ACCOUNT'],
'id' : os.environ['OAUTH_ID'],
'api_key' : os.environ['OAUTH_API_KEY'],
'secret' : os.environ['OAUTH_SECRET']
}
}
# rest api support
restapi = FlaskRestAPI(app)
# bootstrap framework support
Bootstrap(app)
# allow cross-site origin
CORS(app)
# tell bootstrap NOT to fetch from CDNs
app.config['BOOTSTRAP_SERVE_LOCAL'] = True
# login manager setup
lm = LoginManager(app)
lm.login_view = 'utils'
# database setup
db = SQLAlchemy(app)
# for debugging
pp = pprint.PrettyPrinter(stream=sys.stderr)
@lm.user_loader
@app.route('/')
@app.route('/signoffs')
@login_required
@app.route('/events')
@login_required
@app.route('/dump_events')
@login_required
"""
{'AccessLevel': 'Public',
'CheckedInAttendeesNumber': 0,
'ConfirmedRegistrationsCount': 0,
'EndDate': '2021-04-01T00:00:00-04:00',
'EndTimeSpecified': False,
'EventType': 'Regular',
'HasEnabledRegistrationTypes': True,
'Id': 4053381,
'Location': '',
'Name': 'Adams Test',
'PendingRegistrationsCount': 0,
'RegistrationEnabled': True,
'RegistrationsLimit': None,
'StartDate': '2021-04-01T00:00:00-04:00',
'StartTimeSpecified': False,
'Tags': [],
'Url': 'https://api.wildapricot.org/v2.1/accounts/335649/Events/4053381'}
"""
"""
swipe_file = open('/tmp/events.csv','w')
swipe_file_csv = csv.writer(swipe_file)
q = BadgeSwipe.query.order_by(desc(BadgeSwipe.timestamp))
for r in q:
dat = r.to_dict()
ar = []
ar.append(dat['timestamp'])
ar.append(dat['user_name'])
if '.' in dat['card_id']:
# put leading zero back into card id
(fac,id) = dat['card_id'].split('.')
id = '0' + id if len(id) == 4 else id
id = '00' + id if len(id) == 3 else id
id = '000' + id if len(id) == 2 else id
id = '0000' + id if len(id) == 1 else id
dat['card_id'] = fac + id
ar.append(dat['card_id'])
swipe_file_csv.writerow(ar)
swipe_file.close()
return send_file('/tmp/swipes.csv',as_attachment=True,attachment_filename='swipes.csv')
"""
@app.route('/members')
@login_required
@app.route('/utils')
@login_required
@app.route('/logout')
@app.route('/authorize/<provider>')
@app.route('/callback/<provider>')
################################################################################
# REST API STUFF
###
class WAGetAnyEndpointREST(FlaskRestResource):
"""
REST API for our js in the browser to call us on
"""
restapi.add_resource(WAGetAnyEndpointREST,'/api/v1/wa_get_any_endpoint')
def wa_get_any_endpoint_rest():
"""
respond passed endpoint up to WA, return response to requestor
"""
pp.pprint('------wa_get_any_endpoint_rest()--------')
rp = FlaskRestReqparse.RequestParser()
rp.add_argument('endpoint',type=str)
rp.add_argument('$asyncfalse',type=str)
args = rp.parse_args()
wapi,creds = wapi_init()
# browser js doesn't necessarily know our account ID. We add it here
ep = args['endpoint'].replace('$accountid', creds['account'])
# get this user's info
wa_accounts_contact_me = wapi.execute_request(
wa_uri_prefix_accounts + creds['account'] + "/contacts/" + str(current_user.id))
if wa_accounts_contact_me.IsAccountAdministrator:
# WA account admins get carte blanche to do anything
return wa_execute_request_raw(wapi,wa_uri_prefix + ep)
else:
# non admins get to do only certain things
if (urllib.parse.urlparse(ep).path == 'accounts/' + creds['account'] + '/events/'):
return wa_execute_request_raw(wapi,wa_uri_prefix + ep)
if (urllib.parse.urlparse(ep).path == 'accounts/' + creds['account'] + '/eventregistrations'):
return wa_execute_request_raw(wapi,wa_uri_prefix + ep)
return {"error":1,"error_message":"permision denied"}
###
class WAPutAnyEndpointREST(FlaskRestResource):
"""
REST API for our js in the browser to call us on
"""
restapi.add_resource(WAPutAnyEndpointREST,'/api/v1/wa_put_any_endpoint')
def wa_put_any_endpoint_rest():
"""
send PUT endpoint rq up to WA, return response to requestor
"""
rp = FlaskRestReqparse.RequestParser()
wapi,creds = wapi_init()
rq = FlaskRestRequest.json
ep = rq['endpoint']
pd = rq['put_data']
ep = ep.replace('$accountid', creds['account'])
wa_accounts_contact_me = wapi.execute_request(
wa_uri_prefix_accounts + creds['account'] + "/contacts/" + str(current_user.id))
if wa_accounts_contact_me.IsAccountAdministrator:
try:
response = wapi.execute_request_raw(wa_uri_prefix + ep, data=pd, method="PUT")
except urllib.error.HTTPError as e:
return {"error":1,"error_message": ep + ':' + str(e) }
except WaApi.ApiException as e:
return {"error":1,"error_message": ep + ':' + str(e) }
decoded = json.loads(response.read().decode())
result = []
if isinstance(decoded, list):
for item in decoded:
result.append(item)
elif isinstance(decoded, dict):
result.append(decoded)
return result
else:
return {"error":1,"error_message":"You are not a WA account admin"}
## end rest stuff
################################################################################
# Execution starts here
if __name__ == '__main__':
# parse cmd line args and perform operations
try:
# parse cmd line args
ops,args = getopt.getopt(sys.argv[1:],"c:",["debug","cmd="])
except getopt.GetoptError as err:
sys.stderr.write(str(err) + '\n')
sys.stderr.write(usage_mesg)
sys.exit(ex_code_fail)
for o,a in ops:
if (o == '--debug'):
db.create_all()
app.run(host='0.0.0.0',port=8080,debug=True)
if (o == '--cmd' or o == '-c'):
cmd = a
wapi,creds = wapi_init()
response = wapi.execute_request_raw(wa_uri_prefix_accounts +
creds['account'] +
"/contacts/?$async=false")
sys.stderr.write(json.dumps( response.read().decode(), indent=4,sort_keys=True))
"""
wapi,creds = wapi_init()
response = wapi.execute_request_raw("https://api.wildapricot.org/v2.1/", method="GET")
"""
sys.exit(ex_code_success)
# run production on local port that apache proxy's to
sys.stderr.write("Starting web server\n")
db.create_all()
app.run(port=7000,debug=False)
# no options given. print usage and exit
sys.stderr.write(usage_mesg)
sys.exit(ex_code_fail)
| 29.073814 | 109 | 0.629874 | #!/usr/bin/env python3
"""
wautils
A set of web-based tools for Wild Apricot Integration
o Accepts your Wild Apricot Credentials via Wild Apricot OAuth
o Determines if you are have Wild Apricot admin credentials
o Give you further access only if you have admin credentials
"""
usage_mesg = """
usage:
wautils [--debug]
Start up wautils web server
"""
from flask import Flask, redirect, url_for, render_template, flash, g, request, send_file
from flask_cors import CORS
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager, UserMixin, login_user, logout_user, current_user, login_required
from flask_bootstrap import Bootstrap
from flask_restful import Resource as FlaskRestResource
from flask_restful import reqparse as FlaskRestReqparse
from flask_restful import Api as FlaskRestAPI
from flask_restful import request as FlaskRestRequest
import WaApi
import urllib
import os,sys,requests
from dotenv import load_dotenv
from oauth import OAuthSignIn
import getopt
import json
import random
import csv
pos_ran_chars = 'abcdefghijknpqrstuvwxyz23456789'
# for debugging
import pprint
ex_code_fail = 1 # used with sys.exit()
ex_code_success = 0
# get keys and config info from .env
load_dotenv()
wa_uri_prefix = "https://api.wildapricot.org/v2.1/"
wa_uri_prefix_accounts = wa_uri_prefix + "Accounts/"
app = Flask(__name__)
app.secret_key = os.environ['FLASK_SECRET_KEY']
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.sqlite'
app.config['OAUTH_CREDENTIALS'] = {
'wildapricot' : {
'account' : os.environ['WA_ACCOUNT'],
'id' : os.environ['OAUTH_ID'],
'api_key' : os.environ['OAUTH_API_KEY'],
'secret' : os.environ['OAUTH_SECRET']
}
}
# rest api support
restapi = FlaskRestAPI(app)
# bootstrap framework support
Bootstrap(app)
# allow cross-site origin
CORS(app)
# tell bootstrap NOT to fetch from CDNs
app.config['BOOTSTRAP_SERVE_LOCAL'] = True
# login manager setup
lm = LoginManager(app)
lm.login_view = 'utils'
# database setup
db = SQLAlchemy(app)
# for debugging
pp = pprint.PrettyPrinter(stream=sys.stderr)
def wapi_init():
# setup the WA API
creds = app.config['OAUTH_CREDENTIALS']['wildapricot']
wapi = WaApi.WaApiClient(creds['id'],creds['secret'])
wapi.authenticate_with_apikey(creds['api_key'])
return wapi,creds
class User(UserMixin, db.Model):
# define table entry in db for logged-in user
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key = True)
first_name = db.Column(db.String(64), nullable = False)
last_name = db.Column(db.String(64), nullable = False)
email = db.Column(db.String(64), nullable = True)
@lm.user_loader
def load_user(id):
# required by flask_login
return User.query.get(int(id))
@app.route('/')
def index():
# browse to /
if current_user.is_anonymous:
# users not logged in get NONE !
flash('You are not logged in','warning')
else:
# user is logged in.
flash('Hi, ' + current_user.first_name + ' !','success')
# retrieve users credentials
wapi,creds = wapi_init()
global g # things in g object can be accessed in jinja templates
g.wa_accounts_contact_me = wapi.execute_request(
wa_uri_prefix_accounts + creds['account'] + "/contacts/" + str(current_user.id))
if g.wa_accounts_contact_me.IsAccountAdministrator:
# if they are a WA admin congratulate them
flash("Congrats ! You are a Wild Apricot Account Administrator",'success')
return render_template('index.html')
@app.route('/signoffs')
@login_required
def signoffs():
wapi,creds = wapi_init()
global g
# things in g object can be accessed in jinja templates
g.wa_accounts_contact_me = wapi.execute_request(
wa_uri_prefix_accounts + creds['account'] + "/contacts/" + str(current_user.id))
# render signoff html.
# see templates/signoff.html to see what happens
return render_template('signoffs.html')
@app.route('/events')
@login_required
def events():
# retrieve users credentials
wapi,creds = wapi_init()
#
global g # things in g object can be accessed in jinja templates
g.wa_accounts_contact_me = wapi.execute_request(
wa_uri_prefix_accounts + creds['account'] + "/contacts/" + str(current_user.id))
return render_template('events.html')
@app.route('/dump_events')
@login_required
def dump_events():
wapi,creds = wapi_init()
resp = wapi.execute_request_raw( wa_uri_prefix_accounts + creds['account'] + "/events/")
events = json.loads(resp.read().decode())
ran_chars = ''.join(random.choice(pos_ran_chars) for _ in range (10))
event_file_name= '/tmp/events_' + ran_chars + '.csv'
event_file = open(event_file_name,'w') # TODO : create unique fn
event_file_csv = csv.writer(event_file)
ar = []
ar.append( 'AccessLevel' )
ar.append( 'CheckedInAttendeesNumber')
ar.append( 'ConfirmedRegistrationsCount')
ar.append( 'EndDate')
ar.append( 'EndTimeSpecified')
ar.append( 'EventType')
ar.append( 'HasEnabledRegistrationTypes')
ar.append( 'Id')
ar.append( 'Location')
ar.append( 'Name')
ar.append( 'PendingRegistrationsCount')
ar.append( 'RegistrationEnabled')
ar.append( 'RegistrationsLimit')
ar.append( 'StartDate')
ar.append( 'StartTimeSpecified')
ar.append( 'Tags')
ar.append( 'Url')
event_file_csv.writerow(ar)
for ev in events['Events']:
ar = []
ar.append(ev[ 'AccessLevel' ])
ar.append(ev[ 'CheckedInAttendeesNumber'])
ar.append(ev[ 'ConfirmedRegistrationsCount'])
ar.append(ev[ 'EndDate'])
ar.append(ev[ 'EndTimeSpecified'])
ar.append(ev[ 'EventType'])
ar.append(ev[ 'HasEnabledRegistrationTypes'])
ar.append(ev[ 'Id'])
ar.append(ev[ 'Location'])
ar.append(ev[ 'Name'])
ar.append(ev[ 'PendingRegistrationsCount'])
ar.append(ev[ 'RegistrationEnabled'])
ar.append(ev[ 'RegistrationsLimit'])
ar.append(ev[ 'StartDate'])
ar.append(ev[ 'StartTimeSpecified'])
ar.append(ev[ 'Tags'])
ar.append(ev[ 'Url'])
event_file_csv.writerow(ar)
event_file.close()
return send_file(event_file_name,as_attachment=True,attachment_filename='events.csv')
"""
{'AccessLevel': 'Public',
'CheckedInAttendeesNumber': 0,
'ConfirmedRegistrationsCount': 0,
'EndDate': '2021-04-01T00:00:00-04:00',
'EndTimeSpecified': False,
'EventType': 'Regular',
'HasEnabledRegistrationTypes': True,
'Id': 4053381,
'Location': '',
'Name': 'Adams Test',
'PendingRegistrationsCount': 0,
'RegistrationEnabled': True,
'RegistrationsLimit': None,
'StartDate': '2021-04-01T00:00:00-04:00',
'StartTimeSpecified': False,
'Tags': [],
'Url': 'https://api.wildapricot.org/v2.1/accounts/335649/Events/4053381'}
"""
"""
swipe_file = open('/tmp/events.csv','w')
swipe_file_csv = csv.writer(swipe_file)
q = BadgeSwipe.query.order_by(desc(BadgeSwipe.timestamp))
for r in q:
dat = r.to_dict()
ar = []
ar.append(dat['timestamp'])
ar.append(dat['user_name'])
if '.' in dat['card_id']:
# put leading zero back into card id
(fac,id) = dat['card_id'].split('.')
id = '0' + id if len(id) == 4 else id
id = '00' + id if len(id) == 3 else id
id = '000' + id if len(id) == 2 else id
id = '0000' + id if len(id) == 1 else id
dat['card_id'] = fac + id
ar.append(dat['card_id'])
swipe_file_csv.writerow(ar)
swipe_file.close()
return send_file('/tmp/swipes.csv',as_attachment=True,attachment_filename='swipes.csv')
"""
@app.route('/members')
@login_required
def members():
wapi,creds = wapi_init()
global g
# things in g object can be accessed in jinja templates
g.wa_accounts_contact_me = wapi.execute_request(
wa_uri_prefix_accounts + creds['account'] + "/contacts/" + str(current_user.id))
return render_template('members.html')
@app.route('/utils')
@login_required
def utils():
# retrieve users credentials
wapi,creds = wapi_init()
global g # things in g object can be accessed in jinja templates
g.wa_accounts_contact_me = wapi.execute_request(
wa_uri_prefix_accounts + creds['account'] + "/contacts/" + str(current_user.id))
if g.wa_accounts_contact_me.IsAccountAdministrator:
flash("Congrats ! You are a Wild Apricot Account Administrator",'success')
return render_template('utils.html')
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('index'))
@app.route('/authorize/<provider>')
def oauth_authorize(provider):
if not current_user.is_anonymous:
return redirect(url_for('index'))
oauth = OAuthSignIn.get_provider(provider)
return oauth.authorize()
@app.route('/callback/<provider>')
def oauth_callback(provider):
# oauth calls us once we've been granted a token
# from oauth provider
if not current_user.is_anonymous:
# not logged in
return redirect(url_for('index'))
oauth = OAuthSignIn.get_provider(provider)
me = oauth.callback()
'''
oauth.callback() returns:
{
'FirstName' : 'John',
'LastName' : 'Bigbooty',
'Email' : 'john.bigbooty@gmail.com',
'DisplayName' : 'Bigbooty, John',
'Organization' : 'Yoyodyne',
'MembershipLevel' :
{
'Id' : 5059174,
'Url' : 'https://api.wildapricot.org/v2/accounts/123456/MembershipLevels/1059174',
'Name' : 'Key (Legacy)'
},
'Status' : 'Active',
'Id' : 90534910,
'Url' : 'https://api.wildapricot.org/v2/accounts/123456/Contacts/50534910',
'IsAccountAdministrator' : True,
'TermsOfUseAccepted' : True
}
'''
if not('Email' in me):
flash("ERROR oauth_callback(): " + me['Message'],'error')
return redirect(url_for('index'))
# is this user in the DB ?
user = User.query.filter_by(email=me['Email']).first()
if not user:
# if not, add them
user = User(
first_name = me['FirstName'],
last_name = me['LastName'],
email = me['Email'],
id = me['Id']
)
db.session.add(user)
db.session.commit()
# officially login them into flask_login system
login_user(user, True)
return redirect(url_for('index'))
def wa_get_contacts():
# get WA contacts
# returns them formatted on screen
# for testing only
wapi,creds = wapi_init()
response = wapi.execute_request_raw(wa_uri_prefix_accounts +
creds['account'] +
"/contacts/?$async=false")
wa_accounts_contact_me = wapi.execute_request(
wa_uri_prefix_accounts + creds['account'] + "/contacts/" + str(current_user.id))
return('<pre>' + json.dumps(
response.read().decode(),
indent=4,sort_keys=True
) + '</pre>')
def wa_execute_request_raw(wapi,ep):
try:
response = wapi.execute_request_raw(ep)
except urllib.error.HTTPError as e:
return {"error":1,"error_message": ep + ':' + str(e) }
except WaApi.ApiException as e:
return {"error":1,"error_message": ep + ':' + str(e) }
decoded = json.loads(response.read().decode())
result = []
if isinstance(decoded, list):
for item in decoded:
result.append(item)
elif isinstance(decoded, dict):
result.append(decoded)
return result
################################################################################
# REST API STUFF
###
class WAGetAnyEndpointREST(FlaskRestResource):
"""
REST API for our js in the browser to call us on
"""
def get(self):
return wa_get_any_endpoint_rest()
restapi.add_resource(WAGetAnyEndpointREST,'/api/v1/wa_get_any_endpoint')
def wa_get_any_endpoint_rest():
"""
respond passed endpoint up to WA, return response to requestor
"""
pp.pprint('------wa_get_any_endpoint_rest()--------')
rp = FlaskRestReqparse.RequestParser()
rp.add_argument('endpoint',type=str)
rp.add_argument('$asyncfalse',type=str)
args = rp.parse_args()
wapi,creds = wapi_init()
# browser js doesn't necessarily know our account ID. We add it here
ep = args['endpoint'].replace('$accountid', creds['account'])
# get this user's info
wa_accounts_contact_me = wapi.execute_request(
wa_uri_prefix_accounts + creds['account'] + "/contacts/" + str(current_user.id))
if wa_accounts_contact_me.IsAccountAdministrator:
# WA account admins get carte blanche to do anything
return wa_execute_request_raw(wapi,wa_uri_prefix + ep)
else:
# non admins get to do only certain things
if (urllib.parse.urlparse(ep).path == 'accounts/' + creds['account'] + '/events/'):
return wa_execute_request_raw(wapi,wa_uri_prefix + ep)
if (urllib.parse.urlparse(ep).path == 'accounts/' + creds['account'] + '/eventregistrations'):
return wa_execute_request_raw(wapi,wa_uri_prefix + ep)
return {"error":1,"error_message":"permision denied"}
###
class WAPutAnyEndpointREST(FlaskRestResource):
"""
REST API for our js in the browser to call us on
"""
def put(self):
return wa_put_any_endpoint_rest()
restapi.add_resource(WAPutAnyEndpointREST,'/api/v1/wa_put_any_endpoint')
def wa_put_any_endpoint_rest():
"""
send PUT endpoint rq up to WA, return response to requestor
"""
rp = FlaskRestReqparse.RequestParser()
wapi,creds = wapi_init()
rq = FlaskRestRequest.json
ep = rq['endpoint']
pd = rq['put_data']
ep = ep.replace('$accountid', creds['account'])
wa_accounts_contact_me = wapi.execute_request(
wa_uri_prefix_accounts + creds['account'] + "/contacts/" + str(current_user.id))
if wa_accounts_contact_me.IsAccountAdministrator:
try:
response = wapi.execute_request_raw(wa_uri_prefix + ep, data=pd, method="PUT")
except urllib.error.HTTPError as e:
return {"error":1,"error_message": ep + ':' + str(e) }
except WaApi.ApiException as e:
return {"error":1,"error_message": ep + ':' + str(e) }
decoded = json.loads(response.read().decode())
result = []
if isinstance(decoded, list):
for item in decoded:
result.append(item)
elif isinstance(decoded, dict):
result.append(decoded)
return result
else:
return {"error":1,"error_message":"You are not a WA account admin"}
## end rest stuff
################################################################################
# Execution starts here
if __name__ == '__main__':
# parse cmd line args and perform operations
try:
# parse cmd line args
ops,args = getopt.getopt(sys.argv[1:],"c:",["debug","cmd="])
except getopt.GetoptError as err:
sys.stderr.write(str(err) + '\n')
sys.stderr.write(usage_mesg)
sys.exit(ex_code_fail)
for o,a in ops:
if (o == '--debug'):
db.create_all()
app.run(host='0.0.0.0',port=8080,debug=True)
if (o == '--cmd' or o == '-c'):
cmd = a
wapi,creds = wapi_init()
response = wapi.execute_request_raw(wa_uri_prefix_accounts +
creds['account'] +
"/contacts/?$async=false")
sys.stderr.write(json.dumps( response.read().decode(), indent=4,sort_keys=True))
"""
wapi,creds = wapi_init()
response = wapi.execute_request_raw("https://api.wildapricot.org/v2.1/", method="GET")
"""
sys.exit(ex_code_success)
# run production on local port that apache proxy's to
sys.stderr.write("Starting web server\n")
db.create_all()
app.run(port=7000,debug=False)
# no options given. print usage and exit
sys.stderr.write(usage_mesg)
sys.exit(ex_code_fail)
| 7,497 | 327 | 364 |
3a0b2e89672653b9a900abc4afcc73eda8e7bed0 | 5,458 | py | Python | status_lights.py | Nwhitten/Status_Check_Build | 36e2859ad9d5115d07ef3f6fc41a0469dc73ede1 | [
"MIT"
] | null | null | null | status_lights.py | Nwhitten/Status_Check_Build | 36e2859ad9d5115d07ef3f6fc41a0469dc73ede1 | [
"MIT"
] | null | null | null | status_lights.py | Nwhitten/Status_Check_Build | 36e2859ad9d5115d07ef3f6fc41a0469dc73ede1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import time
import unicornhat as uh
import requests
import signal
import buttonshim
import subprocess
# Import the library
import pihole as ph
#from blinkt import set_pixel, set_brightness, show, clear
uh.set_layout(uh.PHAT)
#blinkt.set_clear_on_exit()
import subprocess, platform
Cr,Cg,Cb = 0,50,0
Gr,Gg,Gb = 0,70,0
Ar,Ag,Ab = 130,60,0
Rr,Rg,Rb = 255,0,0
RrX,RgX,RbX = 130,0,0
ExternalSource = True
HoleStatus = "enabled"
uh.clear()
while True:
uh.brightness(1)
if pingOk('188.72.89.2'):
#PiHole
PixelNumber =0
PixelRow =0
uh.set_pixel(PixelNumber,PixelRow,Cr,Cg,Cb)
uh.show()
if website_up('http://192.168.11.125/admin/'):
pihole = ph.PiHole("192.168.11.125")
pihole.refresh()
if pihole.status == "enabled":
uh.set_pixel(PixelNumber,PixelRow, Gr,Gg,Gb)
else:
uh.set_pixel(PixelNumber,PixelRow, Rr,Rg,Rb)
uh.show()
if not pihole.status == HoleStatus:
HoleStatus = pihole.status
exec(open("/usr/local/bin/status_check/DHM_update.py").read())
#exec(open("/usr/local/bin/status_check/inky_update.py").read())
else:
uh.set_pixel(PixelNumber,PixelRow, Rr,Rg,Rb)
uh.show()
#HomeBridge Admin
PixelNumber =1
PixelRow =0
uh.set_pixel(PixelNumber,PixelRow,Cr,Cg,Cb)
uh.show()
if website_up('http://192.168.11.160:8581/login'):
uh.set_pixel(PixelNumber,PixelRow, Gr,Gg,Gb)
else:
uh.set_pixel(PixelNumber,PixelRow, Rr,Rg,Rb)
uh.show()
#CODEX ADMIN
PixelNumber =3
PixelRow =0
uh.set_pixel(PixelNumber,PixelRow,Cr,Cg,Cb)
uh.show()
if website_up('http://192.168.11.190:3755/cgi-bin/'):
uh.set_pixel(PixelNumber,PixelRow, Gr,Gg,Gb)
else:
uh.set_pixel(PixelNumber,PixelRow, Rr,Rg,Rb)
uh.show()
#Codex PLEX
PixelNumber =4
PixelRow =0
uh.set_pixel(PixelNumber,PixelRow,Cr,Cg,Cb)
uh.show()
if website_up('http://192.168.11.190:32400/web/'):
uh.set_pixel(PixelNumber,PixelRow, Gr,Gg,Gb)
else:
uh.set_pixel(PixelNumber,PixelRow, Rr,Rg,Rb)
uh.show()
if ExternalSource == True:
#PiHole
#PixelNumber =0
#PixelRow =2
#uh.set_pixel(PixelNumber,PixelRow,Cr,Cg,Cb)
#uh.show()
#if website_up('http://192.168.195.112/admin/'):
#pihole = ph.PiHole("192.168.195.112")
#pihole.refresh()
#if pihole.status == "enabled":
#uh.set_pixel(PixelNumber,PixelRow, Gr,Gg,Gb)
#else:
#uh.set_pixel(PixelNumber,PixelRow, Rr,Rg,Rb)
#uh.show()
#else:
#uh.set_pixel(PixelNumber,PixelRow, Rr,Rg,Rb)
#uh.show()
#Jane Pi
PixelNumber =3
PixelRow =2
uh.set_pixel(PixelNumber,PixelRow,Cr,Cg,Cb)
uh.show()
if pingOk('192.168.195.112'):
uh.set_pixel(PixelNumber,PixelRow, Gr,Gg,Gb)
else:
uh.set_pixel(PixelNumber,PixelRow, Rr,Rg,Rb)
uh.show()
#Janie Plus PLEX
PixelNumber =4
PixelRow =2
uh.set_pixel(PixelNumber,PixelRow,Cr,Cg,Cb)
uh.show()
if website_up('http://192.168.195.112:32400/web/'):
uh.set_pixel(PixelNumber,PixelRow, Gr,Gg,Gb)
else:
uh.set_pixel(PixelNumber,PixelRow, Rr,Rg,Rb)
uh.show()
#Temp
#buttonshim.set_pixel(0x94, 0x00, 0xd3)
PixelNumber =7
PixelRow =3
uh.set_pixel(PixelNumber,PixelRow,Cr,Cg,Cb)
#show()
cmd ="cat /sys/class/thermal/thermal_zone0/temp"
thermal = subprocess.check_output(cmd, shell=True).decode("utf8")
tempC = round(float(thermal) / 1e3,1)
# Change backlight if temp changes
#print(tempC)
if tempC < 40:
uh.set_pixel(PixelNumber,PixelRow, 0,0,50)
#buttonshim.set_pixel(0,0,200)
elif tempC < 50:
uh.set_pixel(PixelNumber,PixelRow, 0,50,0)
#buttonshim.set_pixel(0,200,0)
elif tempC > 60:
uh.set_pixel(PixelNumber,PixelRow, 75,0,0)
#buttonshim.set_pixel(255,0,0)
else:
uh.set_pixel(PixelNumber,PixelRow, 50,50,0)
#buttonshim.set_pixel(200,200,0)
uh.show()
#clear()
#show()
time.sleep(5)
else:
uh.brightness(1)
uh.clear()
uh.set_all(0,0,0)
uh.show()
time.sleep(1)
uh.set_all(0,0,255)
uh.show()
time.sleep(1)
uh.clear()
| 27.846939 | 137 | 0.532429 | #!/usr/bin/env python3
import time
import unicornhat as uh
import requests
import signal
import buttonshim
import subprocess
# Import the library
import pihole as ph
#from blinkt import set_pixel, set_brightness, show, clear
uh.set_layout(uh.PHAT)
#blinkt.set_clear_on_exit()
def website_up(url):
try:
r = requests.get(url)
return r.ok
except:
return False
import subprocess, platform
def pingOk(sHost):
try:
output = subprocess.check_output("ping -{} 1 {}".format('n' if platform.system().lower()=="windows" else 'c', sHost), shell=True)
except Exception as e:
return False
return True
Cr,Cg,Cb = 0,50,0
Gr,Gg,Gb = 0,70,0
Ar,Ag,Ab = 130,60,0
Rr,Rg,Rb = 255,0,0
RrX,RgX,RbX = 130,0,0
ExternalSource = True
HoleStatus = "enabled"
uh.clear()
while True:
uh.brightness(1)
if pingOk('188.72.89.2'):
#PiHole
PixelNumber =0
PixelRow =0
uh.set_pixel(PixelNumber,PixelRow,Cr,Cg,Cb)
uh.show()
if website_up('http://192.168.11.125/admin/'):
pihole = ph.PiHole("192.168.11.125")
pihole.refresh()
if pihole.status == "enabled":
uh.set_pixel(PixelNumber,PixelRow, Gr,Gg,Gb)
else:
uh.set_pixel(PixelNumber,PixelRow, Rr,Rg,Rb)
uh.show()
if not pihole.status == HoleStatus:
HoleStatus = pihole.status
exec(open("/usr/local/bin/status_check/DHM_update.py").read())
#exec(open("/usr/local/bin/status_check/inky_update.py").read())
else:
uh.set_pixel(PixelNumber,PixelRow, Rr,Rg,Rb)
uh.show()
#HomeBridge Admin
PixelNumber =1
PixelRow =0
uh.set_pixel(PixelNumber,PixelRow,Cr,Cg,Cb)
uh.show()
if website_up('http://192.168.11.160:8581/login'):
uh.set_pixel(PixelNumber,PixelRow, Gr,Gg,Gb)
else:
uh.set_pixel(PixelNumber,PixelRow, Rr,Rg,Rb)
uh.show()
#CODEX ADMIN
PixelNumber =3
PixelRow =0
uh.set_pixel(PixelNumber,PixelRow,Cr,Cg,Cb)
uh.show()
if website_up('http://192.168.11.190:3755/cgi-bin/'):
uh.set_pixel(PixelNumber,PixelRow, Gr,Gg,Gb)
else:
uh.set_pixel(PixelNumber,PixelRow, Rr,Rg,Rb)
uh.show()
#Codex PLEX
PixelNumber =4
PixelRow =0
uh.set_pixel(PixelNumber,PixelRow,Cr,Cg,Cb)
uh.show()
if website_up('http://192.168.11.190:32400/web/'):
uh.set_pixel(PixelNumber,PixelRow, Gr,Gg,Gb)
else:
uh.set_pixel(PixelNumber,PixelRow, Rr,Rg,Rb)
uh.show()
if ExternalSource == True:
#PiHole
#PixelNumber =0
#PixelRow =2
#uh.set_pixel(PixelNumber,PixelRow,Cr,Cg,Cb)
#uh.show()
#if website_up('http://192.168.195.112/admin/'):
#pihole = ph.PiHole("192.168.195.112")
#pihole.refresh()
#if pihole.status == "enabled":
#uh.set_pixel(PixelNumber,PixelRow, Gr,Gg,Gb)
#else:
#uh.set_pixel(PixelNumber,PixelRow, Rr,Rg,Rb)
#uh.show()
#else:
#uh.set_pixel(PixelNumber,PixelRow, Rr,Rg,Rb)
#uh.show()
#Jane Pi
PixelNumber =3
PixelRow =2
uh.set_pixel(PixelNumber,PixelRow,Cr,Cg,Cb)
uh.show()
if pingOk('192.168.195.112'):
uh.set_pixel(PixelNumber,PixelRow, Gr,Gg,Gb)
else:
uh.set_pixel(PixelNumber,PixelRow, Rr,Rg,Rb)
uh.show()
#Janie Plus PLEX
PixelNumber =4
PixelRow =2
uh.set_pixel(PixelNumber,PixelRow,Cr,Cg,Cb)
uh.show()
if website_up('http://192.168.195.112:32400/web/'):
uh.set_pixel(PixelNumber,PixelRow, Gr,Gg,Gb)
else:
uh.set_pixel(PixelNumber,PixelRow, Rr,Rg,Rb)
uh.show()
#Temp
#buttonshim.set_pixel(0x94, 0x00, 0xd3)
PixelNumber =7
PixelRow =3
uh.set_pixel(PixelNumber,PixelRow,Cr,Cg,Cb)
#show()
cmd ="cat /sys/class/thermal/thermal_zone0/temp"
thermal = subprocess.check_output(cmd, shell=True).decode("utf8")
tempC = round(float(thermal) / 1e3,1)
# Change backlight if temp changes
#print(tempC)
if tempC < 40:
uh.set_pixel(PixelNumber,PixelRow, 0,0,50)
#buttonshim.set_pixel(0,0,200)
elif tempC < 50:
uh.set_pixel(PixelNumber,PixelRow, 0,50,0)
#buttonshim.set_pixel(0,200,0)
elif tempC > 60:
uh.set_pixel(PixelNumber,PixelRow, 75,0,0)
#buttonshim.set_pixel(255,0,0)
else:
uh.set_pixel(PixelNumber,PixelRow, 50,50,0)
#buttonshim.set_pixel(200,200,0)
uh.show()
#clear()
#show()
time.sleep(5)
else:
uh.brightness(1)
uh.clear()
uh.set_all(0,0,0)
uh.show()
time.sleep(1)
uh.set_all(0,0,255)
uh.show()
time.sleep(1)
uh.clear()
| 301 | 0 | 45 |
6b5c9bf45ceb56541227c7a5b4ad744db34b61e7 | 3,953 | py | Python | rstudio_fargate/rstudio/ses/ses_custom_resource_handler.py | aws-samples/aws-fargate-with-rstudio-open-source | 67029f300b5fbf011e78e3f245f6332afa29dbc6 | [
"MIT-0"
] | 56 | 2021-06-22T21:51:37.000Z | 2022-03-16T08:09:30.000Z | rstudio_fargate/rstudio/ses/ses_custom_resource_handler.py | aws-samples/aws-fargate-with-rstudio-open-source | 67029f300b5fbf011e78e3f245f6332afa29dbc6 | [
"MIT-0"
] | 4 | 2021-11-17T14:04:25.000Z | 2021-12-05T08:13:51.000Z | rstudio_fargate/rstudio/ses/ses_custom_resource_handler.py | aws-samples/aws-fargate-with-rstudio-open-source | 67029f300b5fbf011e78e3f245f6332afa29dbc6 | [
"MIT-0"
] | 9 | 2021-06-22T23:04:55.000Z | 2022-02-04T03:50:34.000Z | """
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: MIT-0
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
OFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
This script creates the lambda function handler for the lambda function sending emails
to users via SES
"""
import boto3
import logging as log
import random
import string
import cfnresponse
from html import escape
log.getLogger().setLevel(log.INFO)
| 36.266055 | 87 | 0.625601 | """
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: MIT-0
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
OFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
This script creates the lambda function handler for the lambda function sending emails
to users via SES
"""
import boto3
import logging as log
import random
import string
import cfnresponse
from html import escape
log.getLogger().setLevel(log.INFO)
def id_generator(size, chars=string.ascii_lowercase + string.digits):
return "".join(random.choice(chars) for _ in range(size))
def main(event, context):
physical_id = "%s.%s" % (id_generator(6), id_generator(16))
print(event)
try:
log.info("Input event: %s", event)
# Check if this is a Create and we're failing Creates
if event["RequestType"] == "Create" and event["ResourceProperties"].get(
"FailCreate", False
):
raise RuntimeError("Create failure requested")
if event["RequestType"] in ["Create"]:
client = boto3.client("ses")
sm_client = boto3.client("secretsmanager")
email_from = event["ResourceProperties"]["EmailFrom"]
email_to = event["ResourceProperties"]["EmailTo"]
subject = event["ResourceProperties"]["Subject"]
message = event["ResourceProperties"]["Message"]
secret_arn = event["ResourceProperties"]["SecretArn"]
sresponse = sm_client.get_secret_value(SecretId=secret_arn)
message = message.replace("<password>", escape(sresponse["SecretString"]))
response = send_email(email_from, email_to, subject, message)
attributes = {"Response": response}
cfnresponse.send(
event, context, cfnresponse.SUCCESS, attributes, physical_id
)
if event["RequestType"] in ["Delete", "Update"]:
attributes = {"Response": "Delete/update performed"}
cfnresponse.send(
event, context, cfnresponse.SUCCESS, attributes, physical_id
)
except Exception as e:
log.exception(e)
cfnresponse.send(event, context, cfnresponse.FAILED, {}, physical_id)
def send_email(email_from, email_to, subject, message):
client = boto3.client("ses")
return client.send_email(
Source=email_from,
Destination={"ToAddresses": [email_to]},
Message={
"Subject": {"Data": subject, "Charset": "UTF-8"},
"Body": {
"Text": {"Data": get_text_content(message), "Charset": "UTF-8"},
"Html": {"Data": get_html_content(message), "Charset": "UTF-8"},
},
},
ReplyToAddresses=[
"no-reply@test.com",
],
)
def get_html_content(message):
return f"""
<html>
<body>
<h1>Good day,</h1>
<p style="font-size:18px">{message}</p>
</body>
</html>
"""
def get_text_content(message):
return message
| 2,574 | 0 | 125 |
e6b1d28814a329b1f8be57f21014aa4dad907500 | 152 | py | Python | ex030.py | GuilhermeDallari/desafiomuundo1 | df4e06393970d2b686622d13d0ef433b761164fd | [
"MIT"
] | null | null | null | ex030.py | GuilhermeDallari/desafiomuundo1 | df4e06393970d2b686622d13d0ef433b761164fd | [
"MIT"
] | null | null | null | ex030.py | GuilhermeDallari/desafiomuundo1 | df4e06393970d2b686622d13d0ef433b761164fd | [
"MIT"
] | null | null | null | n1 = int(input('digite um numero: '))
r = n1 % 2
if r == 1:
print('O numero {} é IMPAR!'.format(n1))
else:
print('O numero {} é PAR'.format(n1)) | 25.333333 | 44 | 0.565789 | n1 = int(input('digite um numero: '))
r = n1 % 2
if r == 1:
print('O numero {} é IMPAR!'.format(n1))
else:
print('O numero {} é PAR'.format(n1)) | 0 | 0 | 0 |
402932507d958cc74d1592134bb29af38bec7576 | 243 | py | Python | tests/unit/sample.py | BTruer/fastapi-prod-starter | 1b2ae5b1a2b0ec6424af4145cae4f64df8d9a457 | [
"MIT"
] | 2 | 2021-07-07T12:33:42.000Z | 2022-02-13T12:33:28.000Z | api/tests/unit/sample.py | poltavski/CalorieCounter | 24763364bbdbd803233521954e8b2584f97edf39 | [
"Apache-2.0"
] | null | null | null | api/tests/unit/sample.py | poltavski/CalorieCounter | 24763364bbdbd803233521954e8b2584f97edf39 | [
"Apache-2.0"
] | 1 | 2020-11-16T20:07:24.000Z | 2020-11-16T20:07:24.000Z | """A sample test."""
class TestSample:
"""A sample unit test class."""
def test_setup(self) -> None:
"""Test setup."""
return None
def test_one(self) -> None:
"""Assert something."""
assert True
| 17.357143 | 35 | 0.534979 | """A sample test."""
class TestSample:
"""A sample unit test class."""
def test_setup(self) -> None:
"""Test setup."""
return None
def test_one(self) -> None:
"""Assert something."""
assert True
| 0 | 0 | 0 |
0496b5f5f443a63f183e282ddbd4d5fbf13594bd | 718 | py | Python | core/dynamic/Exec.py | GeubsikLang/GeubsikPy | 2cf4d59234b733e78a61c3af638264f973bc30fe | [
"MIT"
] | 4 | 2019-01-19T06:09:01.000Z | 2019-12-22T01:31:37.000Z | core/dynamic/Exec.py | GeubsikLang/GeubsikPy | 2cf4d59234b733e78a61c3af638264f973bc30fe | [
"MIT"
] | null | null | null | core/dynamic/Exec.py | GeubsikLang/GeubsikPy | 2cf4d59234b733e78a61c3af638264f973bc30fe | [
"MIT"
] | null | null | null | import os
import platform
import time
| 24.758621 | 74 | 0.591922 | import os
import platform
import time
class Interpret(object):
def __init__(self, filename: str):
self.py_version = platform.python_version()
self.filename = filename
self.py_string = None
def exec(self, py_string: str):
# noinspection PyPep8Naming,NonAsciiCharacters
급식어컴파일러인부분 = "exec"
self.py_string = py_string
try:
exec(compile(self.py_string, "급식급식", 급식어컴파일러인부분))
except Exception as err:
time.sleep(0.1)
print("런타임 에러는 너굴맨이 처리했으니 안심하라구!")
with open(self.filename + ".log", "w", encoding="utf8") as el:
el.write(str(err))
os.system(self.filename + ".log")
| 688 | 3 | 76 |
56c101ae9fe7521ee7dc2434734541a2b1b6c9b7 | 2,649 | py | Python | PFERD/utils.py | ff781/PFERD | ba9215ebe81e67940f88c52eb1a42b2dc480661b | [
"MIT"
] | null | null | null | PFERD/utils.py | ff781/PFERD | ba9215ebe81e67940f88c52eb1a42b2dc480661b | [
"MIT"
] | null | null | null | PFERD/utils.py | ff781/PFERD | ba9215ebe81e67940f88c52eb1a42b2dc480661b | [
"MIT"
] | null | null | null | """
A few utility bobs and bits.
"""
import re
from pathlib import Path, PurePath
from typing import Optional, Tuple, Union
import bs4
import requests
from .progress import ProgressSettings, progress_for, size_from_headers
PathLike = Union[PurePath, str, Tuple[str, ...]]
def to_path(pathlike: PathLike) -> Path:
"""
Convert a given PathLike into a Path.
"""
if isinstance(pathlike, tuple):
return Path(*pathlike)
return Path(pathlike)
Regex = Union[str, re.Pattern]
def to_pattern(regex: Regex) -> re.Pattern:
"""
Convert a regex to a re.Pattern.
"""
if isinstance(regex, re.Pattern):
return regex
return re.compile(regex)
def soupify(response: requests.Response) -> bs4.BeautifulSoup:
"""
Wrap a requests response in a bs4 object.
"""
return bs4.BeautifulSoup(response.text, "html.parser")
def stream_to_path(
response: requests.Response,
target: Path,
progress_name: Optional[str] = None,
chunk_size: int = 1024 ** 2
) -> None:
"""
Download a requests response content to a file by streaming it. This
function avoids excessive memory usage when downloading large files. The
chunk_size is in bytes.
If progress_name is None, no progress bar will be shown. Otherwise a progress
bar will appear, if the download is bigger than an internal threshold.
"""
with response:
length = size_from_headers(response)
if progress_name and length and int(length) > 1024 * 1024 * 10: # 10 MiB
settings: Optional[ProgressSettings] = ProgressSettings(progress_name, length)
else:
settings = None
with open(target, 'wb') as file_descriptor:
with progress_for(settings) as progress:
for chunk in response.iter_content(chunk_size=chunk_size):
file_descriptor.write(chunk)
progress.advance(len(chunk))
def prompt_yes_no(question: str, default: Optional[bool] = None) -> bool:
"""
Prompts the user a yes/no question and returns their choice.
"""
if default is True:
prompt = "[Y/n]"
elif default is False:
prompt = "[y/N]"
else:
prompt = "[y/n]"
text = f"{question} {prompt} "
wrong_reply = "Please reply with 'yes'/'y' or 'no'/'n'."
while True:
response = input(text).strip().lower()
if response in {"yes", "ye", "y"}:
return True
if response in {"no", "n"}:
return False
if response == "" and default is not None:
return default
print(wrong_reply)
| 26.757576 | 90 | 0.627029 | """
A few utility bobs and bits.
"""
import re
from pathlib import Path, PurePath
from typing import Optional, Tuple, Union
import bs4
import requests
from .progress import ProgressSettings, progress_for, size_from_headers
PathLike = Union[PurePath, str, Tuple[str, ...]]
def to_path(pathlike: PathLike) -> Path:
"""
Convert a given PathLike into a Path.
"""
if isinstance(pathlike, tuple):
return Path(*pathlike)
return Path(pathlike)
Regex = Union[str, re.Pattern]
def to_pattern(regex: Regex) -> re.Pattern:
"""
Convert a regex to a re.Pattern.
"""
if isinstance(regex, re.Pattern):
return regex
return re.compile(regex)
def soupify(response: requests.Response) -> bs4.BeautifulSoup:
"""
Wrap a requests response in a bs4 object.
"""
return bs4.BeautifulSoup(response.text, "html.parser")
def stream_to_path(
response: requests.Response,
target: Path,
progress_name: Optional[str] = None,
chunk_size: int = 1024 ** 2
) -> None:
"""
Download a requests response content to a file by streaming it. This
function avoids excessive memory usage when downloading large files. The
chunk_size is in bytes.
If progress_name is None, no progress bar will be shown. Otherwise a progress
bar will appear, if the download is bigger than an internal threshold.
"""
with response:
length = size_from_headers(response)
if progress_name and length and int(length) > 1024 * 1024 * 10: # 10 MiB
settings: Optional[ProgressSettings] = ProgressSettings(progress_name, length)
else:
settings = None
with open(target, 'wb') as file_descriptor:
with progress_for(settings) as progress:
for chunk in response.iter_content(chunk_size=chunk_size):
file_descriptor.write(chunk)
progress.advance(len(chunk))
def prompt_yes_no(question: str, default: Optional[bool] = None) -> bool:
"""
Prompts the user a yes/no question and returns their choice.
"""
if default is True:
prompt = "[Y/n]"
elif default is False:
prompt = "[y/N]"
else:
prompt = "[y/n]"
text = f"{question} {prompt} "
wrong_reply = "Please reply with 'yes'/'y' or 'no'/'n'."
while True:
response = input(text).strip().lower()
if response in {"yes", "ye", "y"}:
return True
if response in {"no", "n"}:
return False
if response == "" and default is not None:
return default
print(wrong_reply)
| 0 | 0 | 0 |
c42420842bea41ba4380c348d076140bce021469 | 33 | py | Python | src/scraper/__init__.py | laujamie/algo-trading-bot | 2591ed9c0aa803bb77547db28ef0d529ff9a029f | [
"MIT"
] | 13 | 2020-07-23T15:57:56.000Z | 2022-01-13T22:56:04.000Z | src/scraper/__init__.py | laujamie/algo-trading-bot | 2591ed9c0aa803bb77547db28ef0d529ff9a029f | [
"MIT"
] | 2 | 2021-03-31T20:38:22.000Z | 2021-12-13T20:36:54.000Z | src/scraper/__init__.py | laujamie/algo-trading-bot | 2591ed9c0aa803bb77547db28ef0d529ff9a029f | [
"MIT"
] | 3 | 2020-12-15T09:07:41.000Z | 2022-02-07T19:03:51.000Z | from .tickers import get_tickers
| 16.5 | 32 | 0.848485 | from .tickers import get_tickers
| 0 | 0 | 0 |
21698d26b2be15c167c979d47b10597d56dd1c9d | 69 | py | Python | examples/pyminifier/source/hello_world.py | sleeyax/PyDeobfuscator | cffc6cb6ac1fea8e9c99d8501c8b95e93272279f | [
"MIT"
] | 10 | 2020-02-13T19:26:38.000Z | 2021-12-13T08:28:04.000Z | examples/pyminifier/source/hello_world.py | sleeyax/PyDeobfuscator | cffc6cb6ac1fea8e9c99d8501c8b95e93272279f | [
"MIT"
] | null | null | null | examples/pyminifier/source/hello_world.py | sleeyax/PyDeobfuscator | cffc6cb6ac1fea8e9c99d8501c8b95e93272279f | [
"MIT"
] | 3 | 2020-10-03T18:43:50.000Z | 2021-04-28T14:12:48.000Z | msg = "hello"
target = "world"
print("{0} {1} !".format(msg, target)) | 23 | 38 | 0.594203 | msg = "hello"
target = "world"
print("{0} {1} !".format(msg, target)) | 0 | 0 | 0 |
6a260dbe0bee13a0aa6700d7b1a39090542319c3 | 12,868 | py | Python | mmtbx/hydrogens/reduce_hydrogen.py | TiankunZhou/cctbx_project | 373f302f00c12d7239f8e37e3165e62bc1d852cc | [
"BSD-3-Clause-LBNL"
] | null | null | null | mmtbx/hydrogens/reduce_hydrogen.py | TiankunZhou/cctbx_project | 373f302f00c12d7239f8e37e3165e62bc1d852cc | [
"BSD-3-Clause-LBNL"
] | null | null | null | mmtbx/hydrogens/reduce_hydrogen.py | TiankunZhou/cctbx_project | 373f302f00c12d7239f8e37e3165e62bc1d852cc | [
"BSD-3-Clause-LBNL"
] | null | null | null | from __future__ import absolute_import, division, print_function
import six
import sys
import mmtbx.model
import iotbx.pdb
import boost_adaptbx.boost.python as bp
from libtbx.utils import null_out
from libtbx import group_args
from cctbx.array_family import flex
from collections import OrderedDict
#
from cctbx.maptbx.box import shift_and_box_model
ext = bp.import_ext("cctbx_geometry_restraints_ext")
# ==============================================================================
# ==============================================================================
class place_hydrogens():
'''
Add H atoms to a model
Parameters
----------
use_neutron_distances : bool
use neutron distances instead of X-ray
adp_scale : float
scale factor for isotropic B of H atoms.
B(H-atom) = adp_scale * B(parent non-H atom)
keep_existing_H : bool
keep existing H atoms in model, only place missing H
'''
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
def run(self):
'''
Function that places H atoms
'''
model_has_bogus_cs = False
# TODO temporary fix until the code is moved to model class
# check if box cussion of 5 A is enough to prevent symm contacts
cs = self.model.crystal_symmetry()
if cs is None:
self.model = shift_and_box_model(model = self.model)
model_has_bogus_cs = True
# Remove existing H if requested
self.n_H_initial = self.model.get_hd_selection().count(True)
if not self.keep_existing_H:
self.model = self.model.select(~self.model.get_hd_selection())
# Add H atoms and place them at center of coordinates
pdb_hierarchy = self.add_missing_H_atoms_at_bogus_position()
pdb_hierarchy.atoms().reset_serial()
#pdb_hierarchy.sort_atoms_in_place()
p = mmtbx.model.manager.get_default_pdb_interpretation_params()
p.pdb_interpretation.clash_guard.nonbonded_distance_threshold=None
p.pdb_interpretation.use_neutron_distances = self.use_neutron_distances
p.pdb_interpretation.proceed_with_excessive_length_bonds=True
#p.pdb_interpretation.restraints_library.cdl=False # XXX this triggers a bug !=360
ro = self.model.get_restraint_objects()
self.model = mmtbx.model.manager(
model_input = None,
pdb_hierarchy = pdb_hierarchy,
build_grm = True,
stop_for_unknowns = self.stop_for_unknowns,
crystal_symmetry = self.model.crystal_symmetry(),
restraint_objects = ro,
pdb_interpretation_params = p,
log = null_out())
#f = open("intermediate1.pdb","w")
#f.write(self.model.model_as_pdb())
# Only keep H that have been parameterized in riding H procedure
sel_h = self.model.get_hd_selection()
if sel_h.count(True) == 0:
return
# get rid of isolated H atoms.
#For example when heavy atom is missing, H needs not to be placed
sel_isolated = self.model.isolated_atoms_selection()
self.sel_lone_H = sel_h & sel_isolated
self.model = self.model.select(~self.sel_lone_H)
# get riding H manager --> parameterize all H atoms
sel_h = self.model.get_hd_selection()
self.model.setup_riding_h_manager(use_ideal_dihedral = True)
sel_h_in_para = flex.bool(
[bool(x) for x in self.model.riding_h_manager.h_parameterization])
sel_h_not_in_para = sel_h_in_para.exclusive_or(sel_h)
self.site_labels_no_para = [atom.id_str().replace('pdb=','').replace('"','')
for atom in self.model.get_hierarchy().atoms().select(sel_h_not_in_para)]
#
self.model = self.model.select(~sel_h_not_in_para)
self.exclude_H_on_disulfides()
#self.exclude_h_on_coordinated_S()
# f = open("intermediate2.pdb","w")
# f.write(model.model_as_pdb())
# Reset occupancies, ADPs and idealize H atom positions
self.model.reset_adp_for_hydrogens(scale = self.adp_scale)
self.model.reset_occupancy_for_hydrogens_simple()
self.model.idealize_h_riding()
self.exclude_h_on_coordinated_S()
#
self.n_H_final = self.model.get_hd_selection().count(True)
# ------------------------------------------------------------------------------
def add_missing_H_atoms_at_bogus_position(self):
'''
! this changes hierarchy in place !
Add missing H atoms to a pdb_hierarchy object
all H atoms will be at center of coordinates (all of them superposed)
Parameters
----------
pdb_hierarchy : cctbx hierarchy object
pdb_hierarchy to which missing H atoms will be added
'''
pdb_hierarchy = self.model.get_hierarchy()
mon_lib_srv = self.model.get_mon_lib_srv()
#XXX This breaks for 1jxt, residue 2, TYR
get_class = iotbx.pdb.common_residue_names_get_class
no_H_placed_resnames = list()
for m in pdb_hierarchy.models():
for chain in m.chains():
for rg in chain.residue_groups():
n_atom_groups = len(rg.atom_groups())
for ag in rg.atom_groups():
if n_atom_groups == 3 and ag.altloc == '':
continue
#print list(ag.atoms().extract_name())
if(get_class(name=ag.resname) == "common_water"): continue
actual = [a.name.strip().upper() for a in ag.atoms()]
#
mlq = mon_lib_query(residue=ag, mon_lib_srv=mon_lib_srv)
#if (get_class(name=ag.resname) in ['modified_rna_dna', 'other']):
if mlq is None:
self.no_H_placed_mlq.append(ag.resname)
continue
expected_h = list()
for k, v in six.iteritems(mlq.atom_dict()):
if(v.type_symbol=="H"): expected_h.append(k)
missing_h = list(set(expected_h).difference(set(actual)))
if 0: print(ag.resname, missing_h)
new_xyz = ag.atoms().extract_xyz().mean()
hetero = ag.atoms()[0].hetero
for mh in missing_h:
# TODO: this should be probably in a central place
if len(mh) < 4: mh = (' ' + mh).ljust(4)
a = (iotbx.pdb.hierarchy.atom()
.set_name(new_name=mh)
.set_element(new_element="H")
.set_xyz(new_xyz=new_xyz)
.set_hetero(new_hetero=hetero))
ag.append_atom(a)
return pdb_hierarchy
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
def show(self, log):
'''
Informative output
'''
if log is None: log = sys.stdout
#
if (not self.keep_existing_H and self.n_H_initial):
msg = 'Number of hydrogen atoms trimmed from input model: %s \n'
print(msg % self.n_H_initial, file=log)
#
msg = 'Number of hydrogen atoms added to the input model: %s \n'
print(msg % self.n_H_final, file=log)
#
if self.no_H_placed_mlq:
msg = '''
No H atoms were placed on the following residues because no restraints
were found:'''
print(msg, file=log)
for resname in self.no_H_placed_mlq:
print(resname, file=log)
#
if self.site_labels_disulfides:
msg = '''
The following cysteine HG atoms were not placed because the sulfur atom is
involved in a disulfide bond'''
print(msg, file=log)
for label in self.site_labels_disulfides:
print(label, file=log)
#
if self.site_labels_no_para:
msg = '''
The following H atoms were not placed because they could not be parameterized
(not enough restraints information)'''
print(msg, file=log)
for label in self.site_labels_no_para:
print(label)
# ------------------------------------------------------------------------------
# ==============================================================================
# stub for reduce parameters
# TODO can be parameters or phil, depending on how many options are really needed
reduce_master_params_str = """
flip_NQH = True
.type = bool
.help = add H and rotate and flip NQH groups
search_time_limit = 600
.type = int
.help = max seconds to spend in exhaustive search (default=600)
"""
def optimize(model):
"""
Carry out reduce optimization
Parameters
----------
model
mmtbx model object that contains H atoms
H atoms should be at approprite distances
Returns
-------
model
mmtbx model object with optimized H atoms
"""
# hierarchy object --> has hierarchy of structure
pdb_hierarchy = model.get_hierarchy()
# geometry restraints manager --> info about ideal bonds, angles; what atoms are bonded, etc.
grm = model.get_restraints_manager()
print("Reduce optimization happens here")
return model
| 37.083573 | 96 | 0.626515 | from __future__ import absolute_import, division, print_function
import six
import sys
import mmtbx.model
import iotbx.pdb
import boost_adaptbx.boost.python as bp
from libtbx.utils import null_out
from libtbx import group_args
from cctbx.array_family import flex
from collections import OrderedDict
#
from cctbx.maptbx.box import shift_and_box_model
ext = bp.import_ext("cctbx_geometry_restraints_ext")
# ==============================================================================
def mon_lib_query(residue, mon_lib_srv):
md, ani = mon_lib_srv.get_comp_comp_id_and_atom_name_interpretation(
residue_name=residue.resname,
atom_names=residue.atoms().extract_name())
return md
# print(md)
# print(ani)
# get_func = getattr(mon_lib_srv, "get_comp_comp_id", None)
# if (get_func is not None): return get_func(comp_id=residue)
# return mon_lib_srv.get_comp_comp_id_direct(comp_id=residue)
# ==============================================================================
class place_hydrogens():
'''
Add H atoms to a model
Parameters
----------
use_neutron_distances : bool
use neutron distances instead of X-ray
adp_scale : float
scale factor for isotropic B of H atoms.
B(H-atom) = adp_scale * B(parent non-H atom)
keep_existing_H : bool
keep existing H atoms in model, only place missing H
'''
# ------------------------------------------------------------------------------
def __init__(self,
model,
use_neutron_distances = False,
adp_scale = 1,
exclude_water = True,
stop_for_unknowns = False,
keep_existing_H = False):
self.model = model
self.use_neutron_distances = use_neutron_distances
self.adp_scale = adp_scale
self.exclude_water = exclude_water
self.stop_for_unknowns = stop_for_unknowns
self.keep_existing_H = keep_existing_H
#
self.no_H_placed_mlq = list()
self.site_labels_disulfides = list()
self.site_labels_no_para = list()
self.n_H_initial = 0
self.n_H_final = 0
# ------------------------------------------------------------------------------
def run(self):
'''
Function that places H atoms
'''
model_has_bogus_cs = False
# TODO temporary fix until the code is moved to model class
# check if box cussion of 5 A is enough to prevent symm contacts
cs = self.model.crystal_symmetry()
if cs is None:
self.model = shift_and_box_model(model = self.model)
model_has_bogus_cs = True
# Remove existing H if requested
self.n_H_initial = self.model.get_hd_selection().count(True)
if not self.keep_existing_H:
self.model = self.model.select(~self.model.get_hd_selection())
# Add H atoms and place them at center of coordinates
pdb_hierarchy = self.add_missing_H_atoms_at_bogus_position()
pdb_hierarchy.atoms().reset_serial()
#pdb_hierarchy.sort_atoms_in_place()
p = mmtbx.model.manager.get_default_pdb_interpretation_params()
p.pdb_interpretation.clash_guard.nonbonded_distance_threshold=None
p.pdb_interpretation.use_neutron_distances = self.use_neutron_distances
p.pdb_interpretation.proceed_with_excessive_length_bonds=True
#p.pdb_interpretation.restraints_library.cdl=False # XXX this triggers a bug !=360
ro = self.model.get_restraint_objects()
self.model = mmtbx.model.manager(
model_input = None,
pdb_hierarchy = pdb_hierarchy,
build_grm = True,
stop_for_unknowns = self.stop_for_unknowns,
crystal_symmetry = self.model.crystal_symmetry(),
restraint_objects = ro,
pdb_interpretation_params = p,
log = null_out())
#f = open("intermediate1.pdb","w")
#f.write(self.model.model_as_pdb())
# Only keep H that have been parameterized in riding H procedure
sel_h = self.model.get_hd_selection()
if sel_h.count(True) == 0:
return
# get rid of isolated H atoms.
#For example when heavy atom is missing, H needs not to be placed
sel_isolated = self.model.isolated_atoms_selection()
self.sel_lone_H = sel_h & sel_isolated
self.model = self.model.select(~self.sel_lone_H)
# get riding H manager --> parameterize all H atoms
sel_h = self.model.get_hd_selection()
self.model.setup_riding_h_manager(use_ideal_dihedral = True)
sel_h_in_para = flex.bool(
[bool(x) for x in self.model.riding_h_manager.h_parameterization])
sel_h_not_in_para = sel_h_in_para.exclusive_or(sel_h)
self.site_labels_no_para = [atom.id_str().replace('pdb=','').replace('"','')
for atom in self.model.get_hierarchy().atoms().select(sel_h_not_in_para)]
#
self.model = self.model.select(~sel_h_not_in_para)
self.exclude_H_on_disulfides()
#self.exclude_h_on_coordinated_S()
# f = open("intermediate2.pdb","w")
# f.write(model.model_as_pdb())
# Reset occupancies, ADPs and idealize H atom positions
self.model.reset_adp_for_hydrogens(scale = self.adp_scale)
self.model.reset_occupancy_for_hydrogens_simple()
self.model.idealize_h_riding()
self.exclude_h_on_coordinated_S()
#
self.n_H_final = self.model.get_hd_selection().count(True)
# ------------------------------------------------------------------------------
def add_missing_H_atoms_at_bogus_position(self):
'''
! this changes hierarchy in place !
Add missing H atoms to a pdb_hierarchy object
all H atoms will be at center of coordinates (all of them superposed)
Parameters
----------
pdb_hierarchy : cctbx hierarchy object
pdb_hierarchy to which missing H atoms will be added
'''
pdb_hierarchy = self.model.get_hierarchy()
mon_lib_srv = self.model.get_mon_lib_srv()
#XXX This breaks for 1jxt, residue 2, TYR
get_class = iotbx.pdb.common_residue_names_get_class
no_H_placed_resnames = list()
for m in pdb_hierarchy.models():
for chain in m.chains():
for rg in chain.residue_groups():
n_atom_groups = len(rg.atom_groups())
for ag in rg.atom_groups():
if n_atom_groups == 3 and ag.altloc == '':
continue
#print list(ag.atoms().extract_name())
if(get_class(name=ag.resname) == "common_water"): continue
actual = [a.name.strip().upper() for a in ag.atoms()]
#
mlq = mon_lib_query(residue=ag, mon_lib_srv=mon_lib_srv)
#if (get_class(name=ag.resname) in ['modified_rna_dna', 'other']):
if mlq is None:
self.no_H_placed_mlq.append(ag.resname)
continue
expected_h = list()
for k, v in six.iteritems(mlq.atom_dict()):
if(v.type_symbol=="H"): expected_h.append(k)
missing_h = list(set(expected_h).difference(set(actual)))
if 0: print(ag.resname, missing_h)
new_xyz = ag.atoms().extract_xyz().mean()
hetero = ag.atoms()[0].hetero
for mh in missing_h:
# TODO: this should be probably in a central place
if len(mh) < 4: mh = (' ' + mh).ljust(4)
a = (iotbx.pdb.hierarchy.atom()
.set_name(new_name=mh)
.set_element(new_element="H")
.set_xyz(new_xyz=new_xyz)
.set_hetero(new_hetero=hetero))
ag.append_atom(a)
return pdb_hierarchy
# ------------------------------------------------------------------------------
def exclude_H_on_disulfides(self):
rm = self.model.get_restraints_manager()
bond_proxies_simple, asu = rm.geometry.get_all_bond_proxies(
sites_cart = self.model.get_sites_cart())
elements = self.model.get_hierarchy().atoms().extract_element()
ss_i_seqs = []
all_proxies = [p for p in bond_proxies_simple]
for proxy in asu:
all_proxies.append(proxy)
for proxy in all_proxies:
if( isinstance(proxy, ext.bond_simple_proxy)): i,j=proxy.i_seqs
elif(isinstance(proxy, ext.bond_asu_proxy)): i,j=proxy.i_seq,proxy.j_seq
else: assert 0 # never goes here
if([elements[i],elements[j]].count("S")==2): # XXX may be coordinated if metal edits used
ss_i_seqs.extend([i,j])
sel_remove = flex.size_t()
for proxy in all_proxies:
if( isinstance(proxy, ext.bond_simple_proxy)): i,j=proxy.i_seqs
elif(isinstance(proxy, ext.bond_asu_proxy)): i,j=proxy.i_seq,proxy.j_seq
else: assert 0 # never goes here
if(elements[i] in ["H","D"] and j in ss_i_seqs): sel_remove.append(i)
if(elements[j] in ["H","D"] and i in ss_i_seqs): sel_remove.append(j)
#
sl_disulfides = [atom.id_str().replace('pdb=','').replace('"','')
for atom in self.model.get_hierarchy().atoms().select(sel_remove)]
self.site_labels_disulfides = list(OrderedDict.fromkeys(sl_disulfides))
self.model = self.model.select(~flex.bool(self.model.size(), sel_remove))
# ------------------------------------------------------------------------------
def exclude_h_on_coordinated_S(self): # XXX if edits used it should be like in exclude_h_on_SS
rm = self.model.get_restraints_manager().geometry
elements = self.model.get_hierarchy().atoms().extract_element()
# Find possibly coordinated S
exclusion_list = ["H","D","T","S","O","P","N","C","SE"]
sel_s = []
for proxy in rm.pair_proxies().nonbonded_proxies.simple:
i,j = proxy.i_seqs
if(elements[i] == "S" and not elements[j] in exclusion_list): sel_s.append(i)
if(elements[j] == "S" and not elements[i] in exclusion_list): sel_s.append(j)
# Find H attached to possibly coordinated S
bond_proxies_simple, asu = rm.get_all_bond_proxies(
sites_cart = self.model.get_sites_cart())
sel_remove = flex.size_t()
for proxy in bond_proxies_simple:
i,j = proxy.i_seqs
if(elements[i] in ["H","D"] and j in sel_s): sel_remove.append(i)
if(elements[j] in ["H","D"] and i in sel_s): sel_remove.append(j)
self.model = self.model.select(~flex.bool(self.model.size(), sel_remove))
# ------------------------------------------------------------------------------
def show(self, log):
'''
Informative output
'''
if log is None: log = sys.stdout
#
if (not self.keep_existing_H and self.n_H_initial):
msg = 'Number of hydrogen atoms trimmed from input model: %s \n'
print(msg % self.n_H_initial, file=log)
#
msg = 'Number of hydrogen atoms added to the input model: %s \n'
print(msg % self.n_H_final, file=log)
#
if self.no_H_placed_mlq:
msg = '''
No H atoms were placed on the following residues because no restraints
were found:'''
print(msg, file=log)
for resname in self.no_H_placed_mlq:
print(resname, file=log)
#
if self.site_labels_disulfides:
msg = '''
The following cysteine HG atoms were not placed because the sulfur atom is
involved in a disulfide bond'''
print(msg, file=log)
for label in self.site_labels_disulfides:
print(label, file=log)
#
if self.site_labels_no_para:
msg = '''
The following H atoms were not placed because they could not be parameterized
(not enough restraints information)'''
print(msg, file=log)
for label in self.site_labels_no_para:
print(label)
# ------------------------------------------------------------------------------
def get_model(self):
return self.model
def get_counts(self):
return group_args(
number_h_final = self.n_H_final,
no_H_placed_mlq = self.no_H_placed_mlq,
site_labels_disulfides = self.site_labels_disulfides,
site_labels_no_para = self.site_labels_no_para)
# ==============================================================================
# stub for reduce parameters
# TODO can be parameters or phil, depending on how many options are really needed
reduce_master_params_str = """
flip_NQH = True
.type = bool
.help = add H and rotate and flip NQH groups
search_time_limit = 600
.type = int
.help = max seconds to spend in exhaustive search (default=600)
"""
def optimize(model):
"""
Carry out reduce optimization
Parameters
----------
model
mmtbx model object that contains H atoms
H atoms should be at approprite distances
Returns
-------
model
mmtbx model object with optimized H atoms
"""
# hierarchy object --> has hierarchy of structure
pdb_hierarchy = model.get_hierarchy()
# geometry restraints manager --> info about ideal bonds, angles; what atoms are bonded, etc.
grm = model.get_restraints_manager()
print("Reduce optimization happens here")
return model
| 3,823 | 0 | 148 |
3d17970c0c659e800676554673a5275b1a642224 | 1,585 | py | Python | gberatings/history/views.py | georgeeldredge/gberatings | a0d5137075c7bec09555abcc2faf73939e57cd87 | [
"MIT"
] | null | null | null | gberatings/history/views.py | georgeeldredge/gberatings | a0d5137075c7bec09555abcc2faf73939e57cd87 | [
"MIT"
] | null | null | null | gberatings/history/views.py | georgeeldredge/gberatings | a0d5137075c7bec09555abcc2faf73939e57cd87 | [
"MIT"
] | null | null | null | from django.shortcuts import render
# Create your views here.
from django.shortcuts import render_to_response
from django.template import RequestContext
from history.models import Teamindex
#from history.models import Seasonindex
from history.models import Seasondata
| 31.078431 | 105 | 0.764669 | from django.shortcuts import render
# Create your views here.
from django.shortcuts import render_to_response
from django.template import RequestContext
from history.models import Teamindex
#from history.models import Seasonindex
from history.models import Seasondata
def index(request):
# Obtain the context from the HTTP request.
context = RequestContext(request)
# QUERY THE DB FOR A LIST OF TEAMS PRESENT
team_list = Teamindex.objects.order_by('teamname')
context_dict = {'teams': team_list}
# RENDER THE RESPONSE AND FIRE IT BACK
return render_to_response('index.html', context_dict, context)
def history_report(request, team_name):
# Obtain the context from the HTTP request.
context = RequestContext(request)
team_check = Teamindex.objects.filter(teamname=team_name)
if not team_check: # REPORT THAT THE TEAM DOES NOT SEEM TO EXIST
message = "Team is nonexistant"
team_info = ''
team_data = ''
else: # GO GET THIS TEAM'S DATA
message = ''
team_info = Teamindex.objects.get(teamname=team_name)
team_id = team_info.id
# GO GET ALL COMPLETED SEASON DATA FROM THE DB FOR THIS TEAM
team_data = Seasondata.objects.filter(teamid=team_id)
for team in team_data:
team.weekfinalrating = team.weekfinalrating * 10000
team.weekfinalrating = round(team.weekfinalrating, 1)
team.weekfinalsos = round(team.weekfinalsos, 5)
context_dict = {'teamname': team_name, 'teaminfo': team_info, 'message': message, 'teamdata': team_data}
# RENDER THE RESPONSE AND FIRE IT BACK
return render_to_response('history.html', context_dict, context)
| 1,268 | 0 | 46 |
076aaece575282bf0fb3436ef9daab5a06bb37ea | 96,186 | py | Python | tencentcloud/dts/v20180330/models.py | qin5506/tencentcloud-sdk-python | e9c59d80beabf75fb96456bb8d7a53400346fe9a | [
"Apache-2.0"
] | null | null | null | tencentcloud/dts/v20180330/models.py | qin5506/tencentcloud-sdk-python | e9c59d80beabf75fb96456bb8d7a53400346fe9a | [
"Apache-2.0"
] | null | null | null | tencentcloud/dts/v20180330/models.py | qin5506/tencentcloud-sdk-python | e9c59d80beabf75fb96456bb8d7a53400346fe9a | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf8 -*-
# Copyright (c) 2017-2018 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from tencentcloud.common.abstract_model import AbstractModel
class ActivateSubscribeRequest(AbstractModel):
"""ActivateSubscribe请求参数结构体
"""
def __init__(self):
"""
:param SubscribeId: 订阅实例ID。
:type SubscribeId: str
:param InstanceId: 数据库实例ID
:type InstanceId: str
:param SubscribeObjectType: 数据订阅类型0-全实例订阅,1数据订阅,2结构订阅,3数据订阅与结构订阅
:type SubscribeObjectType: int
:param Objects: 订阅对象
:type Objects: :class:`tencentcloud.dts.v20180330.models.SubscribeObject`
:param UniqSubnetId: 数据订阅服务所在子网。默认为数据库实例所在的子网内。
:type UniqSubnetId: str
:param Vport: 订阅服务端口;默认为7507
:type Vport: int
"""
self.SubscribeId = None
self.InstanceId = None
self.SubscribeObjectType = None
self.Objects = None
self.UniqSubnetId = None
self.Vport = None
class ActivateSubscribeResponse(AbstractModel):
"""ActivateSubscribe返回参数结构体
"""
def __init__(self):
"""
:param AsyncRequestId: 配置数据订阅任务ID。
:type AsyncRequestId: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.AsyncRequestId = None
self.RequestId = None
class CompleteMigrateJobRequest(AbstractModel):
"""CompleteMigrateJob请求参数结构体
"""
def __init__(self):
"""
:param JobId: 数据迁移任务ID
:type JobId: str
"""
self.JobId = None
class CompleteMigrateJobResponse(AbstractModel):
"""CompleteMigrateJob返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
class ConsistencyParams(AbstractModel):
"""抽样检验时的抽样参数
"""
def __init__(self):
"""
:param SelectRowsPerTable: 数据内容检测参数。表中选出用来数据对比的行,占表的总行数的百分比。取值范围是整数[1-100]
:type SelectRowsPerTable: int
:param TablesSelectAll: 数据内容检测参数。迁移库表中,要进行数据内容检测的表,占所有表的百分比。取值范围是整数[1-100]
:type TablesSelectAll: int
:param TablesSelectCount: 数据数量检测,检测表行数是否一致。迁移库表中,要进行数据数量检测的表,占所有表的百分比。取值范围是整数[1-100]
:type TablesSelectCount: int
"""
self.SelectRowsPerTable = None
self.TablesSelectAll = None
self.TablesSelectCount = None
class CreateMigrateCheckJobRequest(AbstractModel):
"""CreateMigrateCheckJob请求参数结构体
"""
def __init__(self):
"""
:param JobId: 数据迁移任务ID
:type JobId: str
"""
self.JobId = None
class CreateMigrateCheckJobResponse(AbstractModel):
"""CreateMigrateCheckJob返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
class CreateMigrateJobRequest(AbstractModel):
"""CreateMigrateJob请求参数结构体
"""
def __init__(self):
"""
:param JobName: 数据迁移任务名称
:type JobName: str
:param MigrateOption: 迁移任务配置选项
:type MigrateOption: :class:`tencentcloud.dts.v20180330.models.MigrateOption`
:param SrcDatabaseType: 源实例数据库类型,目前支持:mysql,redis,mongodb,postgresql,mariadb,percona。不同地域数据库类型的具体支持情况,请参考控制台创建迁移页面。
:type SrcDatabaseType: str
:param SrcAccessType: 源实例接入类型,值包括:extranet(外网),cvm(CVM自建实例),dcg(专线接入的实例),vpncloud(云VPN接入的实例),cdb(腾讯云数据库实例),ccn(云联网实例)
:type SrcAccessType: str
:param SrcInfo: 源实例信息,具体内容跟迁移任务类型相关
:type SrcInfo: :class:`tencentcloud.dts.v20180330.models.SrcInfo`
:param DstDatabaseType: 目标实例数据库类型,目前支持:mysql,redis,mongodb,postgresql,mariadb,percona。不同地域数据库类型的具体支持情况,请参考控制台创建迁移页面。
:type DstDatabaseType: str
:param DstAccessType: 目标实例接入类型,目前支持:cdb(腾讯云数据库实例)
:type DstAccessType: str
:param DstInfo: 目标实例信息
:type DstInfo: :class:`tencentcloud.dts.v20180330.models.DstInfo`
:param DatabaseInfo: 需要迁移的源数据库表信息,用json格式的字符串描述。当MigrateOption.MigrateObject配置为2(指定库表迁移)时必填。
对于database-table两级结构的数据库:
[{Database:db1,Table:[table1,table2]},{Database:db2}]
对于database-schema-table三级结构:
[{Database:db1,Schema:s1
Table:[table1,table2]},{Database:db1,Schema:s2
Table:[table1,table2]},{Database:db2,Schema:s1
Table:[table1,table2]},{Database:db3},{Database:db4
Schema:s1}]
:type DatabaseInfo: str
"""
self.JobName = None
self.MigrateOption = None
self.SrcDatabaseType = None
self.SrcAccessType = None
self.SrcInfo = None
self.DstDatabaseType = None
self.DstAccessType = None
self.DstInfo = None
self.DatabaseInfo = None
class CreateMigrateJobResponse(AbstractModel):
"""CreateMigrateJob返回参数结构体
"""
def __init__(self):
"""
:param JobId: 数据迁移任务ID
:type JobId: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.JobId = None
self.RequestId = None
class CreateSubscribeRequest(AbstractModel):
"""CreateSubscribe请求参数结构体
"""
def __init__(self):
"""
:param Product: 订阅的数据库类型,目前支持的有 mysql
:type Product: str
:param PayType: 实例付费类型,1小时计费,0包年包月
:type PayType: int
:param Duration: 购买时长。PayType为0时必填。单位为月,最大支持120
:type Duration: int
:param Count: 购买数量,默认为1,最大为10
:type Count: int
:param AutoRenew: 是否自动续费,默认为0,1表示自动续费。小时计费实例设置该标识无效。
:type AutoRenew: int
:param Tags: 实例资源标签
:type Tags: list of TagItem
"""
self.Product = None
self.PayType = None
self.Duration = None
self.Count = None
self.AutoRenew = None
self.Tags = None
class CreateSubscribeResponse(AbstractModel):
"""CreateSubscribe返回参数结构体
"""
def __init__(self):
"""
:param SubscribeIds: 数据订阅实例的ID数组
注意:此字段可能返回 null,表示取不到有效值。
:type SubscribeIds: list of str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.SubscribeIds = None
self.RequestId = None
class CreateSyncCheckJobRequest(AbstractModel):
"""CreateSyncCheckJob请求参数结构体
"""
def __init__(self):
"""
:param JobId: 灾备同步任务ID
:type JobId: str
"""
self.JobId = None
class CreateSyncCheckJobResponse(AbstractModel):
"""CreateSyncCheckJob返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
class CreateSyncJobRequest(AbstractModel):
"""CreateSyncJob请求参数结构体
"""
def __init__(self):
"""
:param JobName: 灾备同步任务名
:type JobName: str
:param SyncOption: 灾备同步任务配置选项
:type SyncOption: :class:`tencentcloud.dts.v20180330.models.SyncOption`
:param SrcDatabaseType: 源实例数据库类型,目前仅包括:mysql
:type SrcDatabaseType: str
:param SrcAccessType: 源实例接入类型,目前仅包括:cdb(云上cdb实例)
:type SrcAccessType: str
:param SrcInfo: 源实例信息
:type SrcInfo: :class:`tencentcloud.dts.v20180330.models.SyncInstanceInfo`
:param DstDatabaseType: 目标实例数据库类型,目前仅包括:mysql
:type DstDatabaseType: str
:param DstAccessType: 目标实例接入类型,目前仅包括:cdb(云上cdb实例)
:type DstAccessType: str
:param DstInfo: 目标实例信息
:type DstInfo: :class:`tencentcloud.dts.v20180330.models.SyncInstanceInfo`
:param DatabaseInfo: 需要同步的源数据库表信息,用json格式的字符串描述。
对于database-table两级结构的数据库:
[{Database:db1,Table:[table1,table2]},{Database:db2}]
:type DatabaseInfo: str
"""
self.JobName = None
self.SyncOption = None
self.SrcDatabaseType = None
self.SrcAccessType = None
self.SrcInfo = None
self.DstDatabaseType = None
self.DstAccessType = None
self.DstInfo = None
self.DatabaseInfo = None
class CreateSyncJobResponse(AbstractModel):
"""CreateSyncJob返回参数结构体
"""
def __init__(self):
"""
:param JobId: 灾备同步任务ID
:type JobId: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.JobId = None
self.RequestId = None
class DeleteMigrateJobRequest(AbstractModel):
"""DeleteMigrateJob请求参数结构体
"""
def __init__(self):
"""
:param JobId: 数据迁移任务ID
:type JobId: str
"""
self.JobId = None
class DeleteMigrateJobResponse(AbstractModel):
"""DeleteMigrateJob返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
class DeleteSyncJobRequest(AbstractModel):
"""DeleteSyncJob请求参数结构体
"""
def __init__(self):
"""
:param JobId: 待删除的灾备同步任务ID
:type JobId: str
"""
self.JobId = None
class DeleteSyncJobResponse(AbstractModel):
"""DeleteSyncJob返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
class DescribeAsyncRequestInfoRequest(AbstractModel):
"""DescribeAsyncRequestInfo请求参数结构体
"""
def __init__(self):
"""
:param AsyncRequestId: 任务 ID
:type AsyncRequestId: str
"""
self.AsyncRequestId = None
class DescribeAsyncRequestInfoResponse(AbstractModel):
"""DescribeAsyncRequestInfo返回参数结构体
"""
def __init__(self):
"""
:param Info: 任务执行结果信息
:type Info: str
:param Status: 任务执行状态,可能的值有:success,failed,running
:type Status: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Info = None
self.Status = None
self.RequestId = None
class DescribeMigrateCheckJobRequest(AbstractModel):
"""DescribeMigrateCheckJob请求参数结构体
"""
def __init__(self):
"""
:param JobId: 数据迁移任务ID
:type JobId: str
"""
self.JobId = None
class DescribeMigrateCheckJobResponse(AbstractModel):
"""DescribeMigrateCheckJob返回参数结构体
"""
def __init__(self):
"""
:param Status: 校验任务状态:unavailable(当前不可用), starting(开始中),running(校验中),finished(校验完成)
:type Status: str
:param ErrorCode: 任务的错误码
:type ErrorCode: int
:param ErrorMessage: 任务的错误信息
:type ErrorMessage: str
:param Progress: Check任务总进度,如:"30"表示30%
:type Progress: str
:param CheckFlag: 校验是否通过,0-未通过,1-校验通过, 3-未校验
:type CheckFlag: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Status = None
self.ErrorCode = None
self.ErrorMessage = None
self.Progress = None
self.CheckFlag = None
self.RequestId = None
class DescribeMigrateJobsRequest(AbstractModel):
"""DescribeMigrateJobs请求参数结构体
"""
def __init__(self):
"""
:param JobId: 数据迁移任务ID
:type JobId: str
:param JobName: 数据迁移任务名称
:type JobName: str
:param Order: 排序字段,可以取值为JobId、Status、JobName、MigrateType、RunMode、CreateTime
:type Order: str
:param OrderSeq: 排序方式,升序为ASC,降序为DESC
:type OrderSeq: str
:param Offset: 偏移量,默认为0
:type Offset: int
:param Limit: 返回实例数量,默认20,有效区间[1,100]
:type Limit: int
"""
self.JobId = None
self.JobName = None
self.Order = None
self.OrderSeq = None
self.Offset = None
self.Limit = None
class DescribeMigrateJobsResponse(AbstractModel):
"""DescribeMigrateJobs返回参数结构体
"""
def __init__(self):
"""
:param TotalCount: 任务数目
:type TotalCount: int
:param JobList: 任务详情数组
:type JobList: list of MigrateJobInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.JobList = None
self.RequestId = None
class DescribeRegionConfRequest(AbstractModel):
"""DescribeRegionConf请求参数结构体
"""
class DescribeRegionConfResponse(AbstractModel):
"""DescribeRegionConf返回参数结构体
"""
def __init__(self):
"""
:param TotalCount: 可售卖地域的数量
:type TotalCount: int
:param Items: 可售卖地域详情
:type Items: list of SubscribeRegionConf
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.Items = None
self.RequestId = None
class DescribeSubscribeConfRequest(AbstractModel):
"""DescribeSubscribeConf请求参数结构体
"""
def __init__(self):
"""
:param SubscribeId: 订阅实例ID
:type SubscribeId: str
"""
self.SubscribeId = None
class DescribeSubscribeConfResponse(AbstractModel):
"""DescribeSubscribeConf返回参数结构体
"""
def __init__(self):
"""
:param SubscribeId: 订阅实例ID
:type SubscribeId: str
:param SubscribeName: 订阅实例名称
:type SubscribeName: str
:param ChannelId: 订阅通道
:type ChannelId: str
:param Product: 订阅数据库类型
:type Product: str
:param InstanceId: 被订阅的实例
:type InstanceId: str
:param InstanceStatus: 被订阅的实例的状态,可能的值有running,offline,isolate
:type InstanceStatus: str
:param SubsStatus: 订阅实例状态,可能的值有unconfigure-未配置,configuring-配置中,configured-已配置
:type SubsStatus: str
:param Status: 订阅实例生命周期状态,可能的值有:normal-正常,isolating-隔离中,isolated-已隔离,offlining-下线中
:type Status: str
:param CreateTime: 订阅实例创建时间
:type CreateTime: str
:param IsolateTime: 订阅实例被隔离时间
:type IsolateTime: str
:param ExpireTime: 订阅实例到期时间
:type ExpireTime: str
:param OfflineTime: 订阅实例下线时间
:type OfflineTime: str
:param ConsumeStartTime: 订阅实例消费时间起点。
:type ConsumeStartTime: str
:param PayType: 订阅实例计费类型,1-小时计费,0-包年包月
:type PayType: int
:param Vip: 订阅通道Vip
:type Vip: str
:param Vport: 订阅通道Port
:type Vport: int
:param UniqVpcId: 订阅通道所在VpcId
:type UniqVpcId: str
:param UniqSubnetId: 订阅通道所在SubnetId
:type UniqSubnetId: str
:param SdkConsumedTime: 当前SDK消费时间位点
:type SdkConsumedTime: str
:param SdkHost: 订阅SDK IP地址
:type SdkHost: str
:param SubscribeObjectType: 订阅对象类型0-全实例订阅,1-DDL数据订阅,2-DML结构订阅,3-DDL数据订阅+DML结构订阅
:type SubscribeObjectType: int
:param SubscribeObjects: 订阅对象,当SubscribeObjectType 为0时,此字段为空数组
:type SubscribeObjects: list of SubscribeObject
:param ModifyTime: 修改时间
:type ModifyTime: str
:param Region: 地域
:type Region: str
:param Tags: 订阅实例的标签
注意:此字段可能返回 null,表示取不到有效值。
:type Tags: list of TagItem
:param AutoRenewFlag: 自动续费标识,0-不自动续费,1-自动续费
注意:此字段可能返回 null,表示取不到有效值。
:type AutoRenewFlag: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.SubscribeId = None
self.SubscribeName = None
self.ChannelId = None
self.Product = None
self.InstanceId = None
self.InstanceStatus = None
self.SubsStatus = None
self.Status = None
self.CreateTime = None
self.IsolateTime = None
self.ExpireTime = None
self.OfflineTime = None
self.ConsumeStartTime = None
self.PayType = None
self.Vip = None
self.Vport = None
self.UniqVpcId = None
self.UniqSubnetId = None
self.SdkConsumedTime = None
self.SdkHost = None
self.SubscribeObjectType = None
self.SubscribeObjects = None
self.ModifyTime = None
self.Region = None
self.Tags = None
self.AutoRenewFlag = None
self.RequestId = None
class DescribeSubscribesRequest(AbstractModel):
"""DescribeSubscribes请求参数结构体
"""
def __init__(self):
"""
:param SubscribeId: 数据订阅的实例ID
:type SubscribeId: str
:param SubscribeName: 数据订阅的实例名称
:type SubscribeName: str
:param InstanceId: 绑定数据库实例的ID
:type InstanceId: str
:param ChannelId: 数据订阅实例的通道ID
:type ChannelId: str
:param PayType: 计费模式筛选,可能的值:0-包年包月,1-按量计费
:type PayType: str
:param Product: 订阅的数据库产品,如mysql
:type Product: str
:param Status: 数据订阅实例的状态,creating - 创建中,normal - 正常运行,isolating - 隔离中,isolated - 已隔离,offlining - 下线中
:type Status: list of str
:param SubsStatus: 数据订阅实例的配置状态,unconfigure - 未配置, configuring - 配置中,configured - 已配置
:type SubsStatus: list of str
:param Offset: 返回记录的起始偏移量
:type Offset: int
:param Limit: 单次返回的记录数量
:type Limit: int
:param OrderDirection: 排序方向,可选的值为"DESC"和"ASC",默认为"DESC",按创建时间逆序排序
:type OrderDirection: str
:param TagFilters: 标签过滤条件
:type TagFilters: list of TagFilter
:param SubscribeVersion: 订阅实例版本;txdts-旧版数据订阅,kafka-kafka版本数据订阅
:type SubscribeVersion: str
"""
self.SubscribeId = None
self.SubscribeName = None
self.InstanceId = None
self.ChannelId = None
self.PayType = None
self.Product = None
self.Status = None
self.SubsStatus = None
self.Offset = None
self.Limit = None
self.OrderDirection = None
self.TagFilters = None
self.SubscribeVersion = None
class DescribeSubscribesResponse(AbstractModel):
"""DescribeSubscribes返回参数结构体
"""
def __init__(self):
"""
:param TotalCount: 符合查询条件的实例总数
:type TotalCount: int
:param Items: 数据订阅实例的信息列表
:type Items: list of SubscribeInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.Items = None
self.RequestId = None
class DescribeSyncCheckJobRequest(AbstractModel):
"""DescribeSyncCheckJob请求参数结构体
"""
def __init__(self):
"""
:param JobId: 要查询的灾备同步任务ID
:type JobId: str
"""
self.JobId = None
class DescribeSyncCheckJobResponse(AbstractModel):
"""DescribeSyncCheckJob返回参数结构体
"""
def __init__(self):
"""
:param Status: 任务校验状态: starting(开始中),running(校验中),finished(校验完成)
:type Status: str
:param ErrorCode: 任务校验结果代码
:type ErrorCode: int
:param ErrorMessage: 提示信息
:type ErrorMessage: str
:param StepInfo: 任务执行步骤描述
:type StepInfo: list of SyncCheckStepInfo
:param CheckFlag: 校验标志:0(尚未校验成功) , 1(校验成功)
:type CheckFlag: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Status = None
self.ErrorCode = None
self.ErrorMessage = None
self.StepInfo = None
self.CheckFlag = None
self.RequestId = None
class DescribeSyncJobsRequest(AbstractModel):
"""DescribeSyncJobs请求参数结构体
"""
def __init__(self):
"""
:param JobId: 灾备同步任务ID
:type JobId: str
:param JobName: 灾备同步任务名
:type JobName: str
:param Order: 排序字段,可以取值为JobId、Status、JobName、CreateTime
:type Order: str
:param OrderSeq: 排序方式,升序为ASC,降序为DESC
:type OrderSeq: str
:param Offset: 偏移量,默认为0
:type Offset: int
:param Limit: 返回实例数量,默认20,有效区间[1,100]
:type Limit: int
"""
self.JobId = None
self.JobName = None
self.Order = None
self.OrderSeq = None
self.Offset = None
self.Limit = None
class DescribeSyncJobsResponse(AbstractModel):
"""DescribeSyncJobs返回参数结构体
"""
def __init__(self):
"""
:param TotalCount: 任务数目
:type TotalCount: int
:param JobList: 任务详情数组
:type JobList: list of SyncJobInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.JobList = None
self.RequestId = None
class DstInfo(AbstractModel):
"""目的实例信息,具体内容跟迁移任务类型相关
"""
def __init__(self):
"""
:param InstanceId: 目标实例ID,如cdb-jd92ijd8
:type InstanceId: str
:param Region: 目标实例地域,如ap-guangzhou
:type Region: str
:param Ip: 目标实例vip。已废弃,无需填写
:type Ip: str
:param Port: 目标实例vport。已废弃,无需填写
:type Port: int
:param ReadOnly: 目前只对MySQL有效。当为整实例迁移时,1-只读,0-可读写。
:type ReadOnly: int
:param User: 目标数据库账号
:type User: str
:param Password: 目标数据库密码
:type Password: str
"""
self.InstanceId = None
self.Region = None
self.Ip = None
self.Port = None
self.ReadOnly = None
self.User = None
self.Password = None
class ErrorInfo(AbstractModel):
"""迁移任务错误信息及提示
"""
def __init__(self):
"""
:param ErrorLog: 具体的报错日志, 包含错误码和错误信息
:type ErrorLog: str
:param HelpDoc: 报错对应的帮助文档Ur
:type HelpDoc: str
"""
self.ErrorLog = None
self.HelpDoc = None
class IsolateSubscribeRequest(AbstractModel):
"""IsolateSubscribe请求参数结构体
"""
def __init__(self):
"""
:param SubscribeId: 订阅实例ID
:type SubscribeId: str
"""
self.SubscribeId = None
class IsolateSubscribeResponse(AbstractModel):
"""IsolateSubscribe返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
class MigrateDetailInfo(AbstractModel):
"""描述详细迁移过程
"""
def __init__(self):
"""
:param StepAll: 总步骤数
:type StepAll: int
:param StepNow: 当前步骤
:type StepNow: int
:param Progress: 总进度,如:"10"
:type Progress: str
:param CurrentStepProgress: 当前步骤进度,如:"1"
:type CurrentStepProgress: str
:param MasterSlaveDistance: 主从差距,MB;在增量同步阶段有效,目前支持产品为:redis和mysql
:type MasterSlaveDistance: int
:param SecondsBehindMaster: 主从差距,秒;在增量同步阶段有效,目前支持产品为:mysql
:type SecondsBehindMaster: int
:param StepInfo: 步骤信息
:type StepInfo: list of MigrateStepDetailInfo
"""
self.StepAll = None
self.StepNow = None
self.Progress = None
self.CurrentStepProgress = None
self.MasterSlaveDistance = None
self.SecondsBehindMaster = None
self.StepInfo = None
class MigrateJobInfo(AbstractModel):
"""迁移任务详情
"""
def __init__(self):
"""
:param JobId: 数据迁移任务ID
:type JobId: str
:param JobName: 数据迁移任务名称
:type JobName: str
:param MigrateOption: 迁移任务配置选项
:type MigrateOption: :class:`tencentcloud.dts.v20180330.models.MigrateOption`
:param SrcDatabaseType: 源实例数据库类型:mysql,redis,mongodb,postgresql,mariadb,percona
:type SrcDatabaseType: str
:param SrcAccessType: 源实例接入类型,值包括:extranet(外网),cvm(cvm自建实例),dcg(专线接入的实例),vpncloud(云vpn接入的实例),cdb(腾讯云数据库实例),ccn(云联网实例)
:type SrcAccessType: str
:param SrcInfo: 源实例信息,具体内容跟迁移任务类型相关
:type SrcInfo: :class:`tencentcloud.dts.v20180330.models.SrcInfo`
:param DstDatabaseType: 目标实例数据库类型:mysql,redis,mongodb,postgresql,mariadb,percona
:type DstDatabaseType: str
:param DstAccessType: 目标实例接入类型,目前支持:cdb(腾讯云数据库实例)
:type DstAccessType: str
:param DstInfo: 目标实例信息
:type DstInfo: :class:`tencentcloud.dts.v20180330.models.DstInfo`
:param DatabaseInfo: 需要迁移的源数据库表信息,如果需要迁移的是整个实例,该字段为[]
:type DatabaseInfo: str
:param CreateTime: 任务创建(提交)时间
:type CreateTime: str
:param StartTime: 任务开始执行时间
:type StartTime: str
:param EndTime: 任务执行结束时间
:type EndTime: str
:param Status: 任务状态,取值为:1-创建中(Creating),3-校验中(Checking)4-校验通过(CheckPass),5-校验不通过(CheckNotPass),7-任务运行(Running),8-准备完成(ReadyComplete),9-任务成功(Success),10-任务失败(Failed),11-撤销中(Stopping),12-完成中(Completing)
:type Status: int
:param Detail: 任务详情
:type Detail: :class:`tencentcloud.dts.v20180330.models.MigrateDetailInfo`
:param ErrorInfo: 任务错误信息提示,当任务发生错误时,不为null或者空值
:type ErrorInfo: list of ErrorInfo
"""
self.JobId = None
self.JobName = None
self.MigrateOption = None
self.SrcDatabaseType = None
self.SrcAccessType = None
self.SrcInfo = None
self.DstDatabaseType = None
self.DstAccessType = None
self.DstInfo = None
self.DatabaseInfo = None
self.CreateTime = None
self.StartTime = None
self.EndTime = None
self.Status = None
self.Detail = None
self.ErrorInfo = None
class MigrateOption(AbstractModel):
"""迁移任务配置选项
"""
def __init__(self):
"""
:param RunMode: 任务运行模式,值包括:1-立即执行,2-定时执行
:type RunMode: int
:param ExpectTime: 期望执行时间,当runMode=2时,该字段必填,时间格式:yyyy-mm-dd hh:mm:ss
:type ExpectTime: str
:param MigrateType: 数据迁移类型,值包括:1-结构迁移,2-全量迁移,3-全量+增量迁移
:type MigrateType: int
:param MigrateObject: 迁移对象,1-整个实例,2-指定库表
:type MigrateObject: int
:param ConsistencyType: 抽样数据一致性检测参数,1-未配置,2-全量检测,3-抽样检测, 4-仅校验不一致表,5-不检测
:type ConsistencyType: int
:param IsOverrideRoot: 是否用源库Root账户覆盖目标库,值包括:0-不覆盖,1-覆盖,选择库表或者结构迁移时应该为0
:type IsOverrideRoot: int
:param ExternParams: 不同数据库用到的额外参数.以JSON格式描述.
Redis可定义如下的参数:
{
"ClientOutputBufferHardLimit":512, 从机缓冲区的硬性容量限制(MB)
"ClientOutputBufferSoftLimit":512, 从机缓冲区的软性容量限制(MB)
"ClientOutputBufferPersistTime":60, 从机缓冲区的软性限制持续时间(秒)
"ReplBacklogSize":512, 环形缓冲区容量限制(MB)
"ReplTimeout":120, 复制超时时间(秒)
}
MongoDB可定义如下的参数:
{
'SrcAuthDatabase':'admin',
'SrcAuthFlag': "1",
'SrcAuthMechanism':"SCRAM-SHA-1"
}
MySQL暂不支持额外参数设置。
:type ExternParams: str
:param ConsistencyParams: 仅用于“抽样数据一致性检测”,ConsistencyType配置为抽样检测时,必选
:type ConsistencyParams: :class:`tencentcloud.dts.v20180330.models.ConsistencyParams`
"""
self.RunMode = None
self.ExpectTime = None
self.MigrateType = None
self.MigrateObject = None
self.ConsistencyType = None
self.IsOverrideRoot = None
self.ExternParams = None
self.ConsistencyParams = None
class MigrateStepDetailInfo(AbstractModel):
"""迁移中的步骤信息
"""
def __init__(self):
"""
:param StepNo: 步骤序列
:type StepNo: int
:param StepName: 步骤展现名称
:type StepName: str
:param StepId: 步骤英文标识
:type StepId: str
:param Status: 步骤状态:0-默认值,1-成功,2-失败,3-执行中,4-未执行
:type Status: int
:param StartTime: 当前步骤开始的时间,格式为"yyyy-mm-dd hh:mm:ss",该字段不存在或者为空是无意义
注意:此字段可能返回 null,表示取不到有效值。
:type StartTime: str
"""
self.StepNo = None
self.StepName = None
self.StepId = None
self.Status = None
self.StartTime = None
class ModifyMigrateJobRequest(AbstractModel):
"""ModifyMigrateJob请求参数结构体
"""
def __init__(self):
"""
:param JobId: 待修改的数据迁移任务ID
:type JobId: str
:param JobName: 数据迁移任务名称
:type JobName: str
:param MigrateOption: 迁移任务配置选项
:type MigrateOption: :class:`tencentcloud.dts.v20180330.models.MigrateOption`
:param SrcAccessType: 源实例接入类型,值包括:extranet(外网),cvm(CVM自建实例),dcg(专线接入的实例),vpncloud(云VPN接入的实例),cdb(云上CDB实例)
:type SrcAccessType: str
:param SrcInfo: 源实例信息,具体内容跟迁移任务类型相关
:type SrcInfo: :class:`tencentcloud.dts.v20180330.models.SrcInfo`
:param DstAccessType: 目标实例接入类型,值包括:extranet(外网),cvm(CVM自建实例),dcg(专线接入的实例),vpncloud(云VPN接入的实例),cdb(云上CDB实例). 目前只支持cdb.
:type DstAccessType: str
:param DstInfo: 目标实例信息, 其中目标实例地域不允许修改.
:type DstInfo: :class:`tencentcloud.dts.v20180330.models.DstInfo`
:param DatabaseInfo: 当选择'指定库表'迁移的时候, 需要设置待迁移的源数据库表信息,用符合json数组格式的字符串描述, 如下所例。
对于database-table两级结构的数据库:
[{"Database":"db1","Table":["table1","table2"]},{"Database":"db2"}]
对于database-schema-table三级结构:
[{"Database":"db1","Schema":"s1","Table":["table1","table2"]},{"Database":"db1","Schema":"s2","Table":["table1","table2"]},{"Database":"db2","Schema":"s1","Table":["table1","table2"]},{"Database":"db3"},{"Database":"db4","Schema":"s1"}]
如果是'整个实例'的迁移模式,不需设置该字段
:type DatabaseInfo: str
"""
self.JobId = None
self.JobName = None
self.MigrateOption = None
self.SrcAccessType = None
self.SrcInfo = None
self.DstAccessType = None
self.DstInfo = None
self.DatabaseInfo = None
class ModifyMigrateJobResponse(AbstractModel):
"""ModifyMigrateJob返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
class ModifySubscribeAutoRenewFlagRequest(AbstractModel):
"""ModifySubscribeAutoRenewFlag请求参数结构体
"""
def __init__(self):
"""
:param SubscribeId: 订阅实例ID,例如:subs-8uey736k
:type SubscribeId: str
:param AutoRenewFlag: 自动续费标识。1-自动续费,0-不自动续费
:type AutoRenewFlag: int
"""
self.SubscribeId = None
self.AutoRenewFlag = None
class ModifySubscribeAutoRenewFlagResponse(AbstractModel):
"""ModifySubscribeAutoRenewFlag返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
class ModifySubscribeConsumeTimeRequest(AbstractModel):
"""ModifySubscribeConsumeTime请求参数结构体
"""
def __init__(self):
"""
:param SubscribeId: 数据订阅实例的ID
:type SubscribeId: str
:param ConsumeStartTime: 消费时间起点,也即是指定订阅数据的时间起点,时间格式如:Y-m-d h:m:s,取值范围为过去24小时之内
:type ConsumeStartTime: str
"""
self.SubscribeId = None
self.ConsumeStartTime = None
class ModifySubscribeConsumeTimeResponse(AbstractModel):
"""ModifySubscribeConsumeTime返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
class ModifySubscribeNameRequest(AbstractModel):
"""ModifySubscribeName请求参数结构体
"""
def __init__(self):
"""
:param SubscribeId: 数据订阅实例的ID
:type SubscribeId: str
:param SubscribeName: 数据订阅实例的名称,长度限制为[1,60]
:type SubscribeName: str
"""
self.SubscribeId = None
self.SubscribeName = None
class ModifySubscribeNameResponse(AbstractModel):
"""ModifySubscribeName返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
class ModifySubscribeObjectsRequest(AbstractModel):
"""ModifySubscribeObjects请求参数结构体
"""
def __init__(self):
"""
:param SubscribeId: 数据订阅实例的ID
:type SubscribeId: str
:param SubscribeObjectType: 数据订阅的类型,可选的值有:0 - 全实例订阅;1 - 数据订阅;2 - 结构订阅;3 - 数据订阅+结构订阅
:type SubscribeObjectType: int
:param Objects: 订阅的数据库表信息
:type Objects: list of SubscribeObject
"""
self.SubscribeId = None
self.SubscribeObjectType = None
self.Objects = None
class ModifySubscribeObjectsResponse(AbstractModel):
"""ModifySubscribeObjects返回参数结构体
"""
def __init__(self):
"""
:param AsyncRequestId: 异步任务的ID
:type AsyncRequestId: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.AsyncRequestId = None
self.RequestId = None
class ModifySubscribeVipVportRequest(AbstractModel):
"""ModifySubscribeVipVport请求参数结构体
"""
def __init__(self):
"""
:param SubscribeId: 数据订阅实例的ID
:type SubscribeId: str
:param DstUniqSubnetId: 指定目的子网,如果传此参数,DstIp必须在目的子网内
:type DstUniqSubnetId: str
:param DstIp: 目标IP,与DstPort至少传一个
:type DstIp: str
:param DstPort: 目标PORT,支持范围为:[1025-65535]
:type DstPort: int
"""
self.SubscribeId = None
self.DstUniqSubnetId = None
self.DstIp = None
self.DstPort = None
class ModifySubscribeVipVportResponse(AbstractModel):
"""ModifySubscribeVipVport返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
class ModifySyncJobRequest(AbstractModel):
"""ModifySyncJob请求参数结构体
"""
def __init__(self):
"""
:param JobId: 待修改的灾备同步任务ID
:type JobId: str
:param JobName: 灾备同步任务名称
:type JobName: str
:param SyncOption: 灾备同步任务配置选项
:type SyncOption: :class:`tencentcloud.dts.v20180330.models.SyncOption`
:param DatabaseInfo: 当选择'指定库表'灾备同步的时候, 需要设置待同步的源数据库表信息,用符合json数组格式的字符串描述, 如下所例。
对于database-table两级结构的数据库:
[{"Database":"db1","Table":["table1","table2"]},{"Database":"db2"}]
:type DatabaseInfo: str
"""
self.JobId = None
self.JobName = None
self.SyncOption = None
self.DatabaseInfo = None
class ModifySyncJobResponse(AbstractModel):
"""ModifySyncJob返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
class OfflineIsolatedSubscribeRequest(AbstractModel):
"""OfflineIsolatedSubscribe请求参数结构体
"""
def __init__(self):
"""
:param SubscribeId: 数据订阅实例的ID
:type SubscribeId: str
"""
self.SubscribeId = None
class OfflineIsolatedSubscribeResponse(AbstractModel):
"""OfflineIsolatedSubscribe返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
class ResetSubscribeRequest(AbstractModel):
"""ResetSubscribe请求参数结构体
"""
def __init__(self):
"""
:param SubscribeId: 数据订阅实例的ID
:type SubscribeId: str
"""
self.SubscribeId = None
class ResetSubscribeResponse(AbstractModel):
"""ResetSubscribe返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
class SrcInfo(AbstractModel):
"""源实例信息
"""
def __init__(self):
"""
:param AccessKey: 阿里云AccessKey。源库是阿里云RDS5.6适用
:type AccessKey: str
:param Ip: 实例的IP地址
:type Ip: str
:param Port: 实例的端口
:type Port: int
:param User: 实例的用户名
:type User: str
:param Password: 实例的密码
:type Password: str
:param RdsInstanceId: 阿里云RDS实例ID。源库是阿里云RDS5.6/5.6适用
:type RdsInstanceId: str
:param CvmInstanceId: CVM实例短ID,格式如:ins-olgl39y8,与云服务器控制台页面显示的实例ID相同。如果是CVM自建实例,需要传递此字段
:type CvmInstanceId: str
:param UniqDcgId: 专线网关ID,格式如:dcg-0rxtqqxb
:type UniqDcgId: str
:param VpcId: 私有网络ID,格式如:vpc-92jblxto
:type VpcId: str
:param SubnetId: 私有网络下的子网ID,格式如:subnet-3paxmkdz
:type SubnetId: str
:param UniqVpnGwId: VPN网关ID,格式如:vpngw-9ghexg7q
:type UniqVpnGwId: str
:param InstanceId: 数据库实例ID,格式如:cdb-powiqx8q
:type InstanceId: str
:param Region: 地域英文名,如:ap-guangzhou
:type Region: str
:param Supplier: 当实例为RDS实例时,填写为aliyun, 其他情况均填写others
:type Supplier: str
:param CcnId: 云联网ID,如:ccn-afp6kltc
注意:此字段可能返回 null,表示取不到有效值。
:type CcnId: str
:param EngineVersion: 数据库版本,当实例为RDS实例时才有效,格式如:5.6或者5.7,默认为5.6
:type EngineVersion: str
"""
self.AccessKey = None
self.Ip = None
self.Port = None
self.User = None
self.Password = None
self.RdsInstanceId = None
self.CvmInstanceId = None
self.UniqDcgId = None
self.VpcId = None
self.SubnetId = None
self.UniqVpnGwId = None
self.InstanceId = None
self.Region = None
self.Supplier = None
self.CcnId = None
self.EngineVersion = None
class StartMigrateJobRequest(AbstractModel):
"""StartMigrateJob请求参数结构体
"""
def __init__(self):
"""
:param JobId: 数据迁移任务ID
:type JobId: str
"""
self.JobId = None
class StartMigrateJobResponse(AbstractModel):
"""StartMigrateJob返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
class StartSyncJobRequest(AbstractModel):
"""StartSyncJob请求参数结构体
"""
def __init__(self):
"""
:param JobId: 灾备同步任务ID
:type JobId: str
"""
self.JobId = None
class StartSyncJobResponse(AbstractModel):
"""StartSyncJob返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
class StopMigrateJobRequest(AbstractModel):
"""StopMigrateJob请求参数结构体
"""
def __init__(self):
"""
:param JobId: 数据迁移任务ID
:type JobId: str
"""
self.JobId = None
class StopMigrateJobResponse(AbstractModel):
"""StopMigrateJob返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
class SubscribeInfo(AbstractModel):
"""订阅实例信息
"""
def __init__(self):
"""
:param SubscribeId: 数据订阅的实例ID
:type SubscribeId: str
:param SubscribeName: 数据订阅实例的名称
:type SubscribeName: str
:param ChannelId: 数据订阅实例绑定的通道ID
:type ChannelId: str
:param Product: 数据订阅绑定实例对应的产品名称
:type Product: str
:param InstanceId: 数据订阅实例绑定的数据库实例ID
:type InstanceId: str
:param InstanceStatus: 数据订阅实例绑定的数据库实例状态
:type InstanceStatus: str
:param SubsStatus: 数据订阅实例的配置状态,unconfigure - 未配置, configuring - 配置中,configured - 已配置
:type SubsStatus: str
:param ModifyTime: 上次修改时间
:type ModifyTime: str
:param CreateTime: 创建时间
:type CreateTime: str
:param IsolateTime: 隔离时间
:type IsolateTime: str
:param ExpireTime: 到期时间
:type ExpireTime: str
:param OfflineTime: 下线时间
:type OfflineTime: str
:param ConsumeStartTime: 最近一次修改的消费时间起点,如果从未修改则为零值
:type ConsumeStartTime: str
:param Region: 数据订阅实例所属地域
:type Region: str
:param PayType: 计费方式,0 - 包年包月,1 - 按量计费
:type PayType: int
:param Vip: 数据订阅实例的Vip
:type Vip: str
:param Vport: 数据订阅实例的Vport
:type Vport: int
:param UniqVpcId: 数据订阅实例Vip所在VPC的唯一ID
:type UniqVpcId: str
:param UniqSubnetId: 数据订阅实例Vip所在子网的唯一ID
:type UniqSubnetId: str
:param Status: 数据订阅实例的状态,creating - 创建中,normal - 正常运行,isolating - 隔离中,isolated - 已隔离,offlining - 下线中,offline - 已下线
:type Status: str
:param SdkConsumedTime: SDK最后一条确认消息的时间戳,如果SDK一直消费,也可以作为SDK当前消费时间点
:type SdkConsumedTime: str
:param Tags: 标签
注意:此字段可能返回 null,表示取不到有效值。
:type Tags: list of TagItem
:param AutoRenewFlag: 自动续费标识。0-不自动续费,1-自动续费
注意:此字段可能返回 null,表示取不到有效值。
:type AutoRenewFlag: int
:param SubscribeVersion: 订阅实例版本;txdts-旧版数据订阅,kafka-kafka版本数据订阅
注意:此字段可能返回 null,表示取不到有效值。
:type SubscribeVersion: str
"""
self.SubscribeId = None
self.SubscribeName = None
self.ChannelId = None
self.Product = None
self.InstanceId = None
self.InstanceStatus = None
self.SubsStatus = None
self.ModifyTime = None
self.CreateTime = None
self.IsolateTime = None
self.ExpireTime = None
self.OfflineTime = None
self.ConsumeStartTime = None
self.Region = None
self.PayType = None
self.Vip = None
self.Vport = None
self.UniqVpcId = None
self.UniqSubnetId = None
self.Status = None
self.SdkConsumedTime = None
self.Tags = None
self.AutoRenewFlag = None
self.SubscribeVersion = None
class SubscribeObject(AbstractModel):
"""数据数据订阅的对象
"""
def __init__(self):
"""
:param ObjectsType: 数据订阅对象的类型,0-数据库,1-数据库内的表
注意:此字段可能返回 null,表示取不到有效值。
:type ObjectsType: int
:param DatabaseName: 订阅数据库的名称
注意:此字段可能返回 null,表示取不到有效值。
:type DatabaseName: str
:param TableNames: 订阅数据库中表名称数组
注意:此字段可能返回 null,表示取不到有效值。
:type TableNames: list of str
"""
self.ObjectsType = None
self.DatabaseName = None
self.TableNames = None
class SubscribeRegionConf(AbstractModel):
"""数据订阅地域售卖信息
"""
def __init__(self):
"""
:param RegionName: 地域名称,如广州
注意:此字段可能返回 null,表示取不到有效值。
:type RegionName: str
:param Region: 地区标识,如ap-guangzhou
注意:此字段可能返回 null,表示取不到有效值。
:type Region: str
:param Area: 地域名称,如华南地区
注意:此字段可能返回 null,表示取不到有效值。
:type Area: str
:param IsDefaultRegion: 是否为默认地域,0 - 不是,1 - 是的
注意:此字段可能返回 null,表示取不到有效值。
:type IsDefaultRegion: int
:param Status: 当前地域的售卖情况,1 - 正常, 2-灰度,3 - 停售
注意:此字段可能返回 null,表示取不到有效值。
:type Status: int
"""
self.RegionName = None
self.Region = None
self.Area = None
self.IsDefaultRegion = None
self.Status = None
class SwitchDrToMasterRequest(AbstractModel):
"""SwitchDrToMaster请求参数结构体
"""
def __init__(self):
"""
:param DstInfo: 灾备实例的信息
:type DstInfo: :class:`tencentcloud.dts.v20180330.models.SyncInstanceInfo`
:param DatabaseType: 数据库的类型 (如 mysql)
:type DatabaseType: str
"""
self.DstInfo = None
self.DatabaseType = None
class SwitchDrToMasterResponse(AbstractModel):
"""SwitchDrToMaster返回参数结构体
"""
def __init__(self):
"""
:param AsyncRequestId: 后台异步任务请求id
:type AsyncRequestId: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.AsyncRequestId = None
self.RequestId = None
class SyncCheckStepInfo(AbstractModel):
"""灾备任务校验步骤
"""
def __init__(self):
"""
:param StepNo: 步骤序列
:type StepNo: int
:param StepName: 步骤展现名称
:type StepName: str
:param StepCode: 步骤执行结果代码
:type StepCode: int
:param StepMessage: 步骤执行结果提示
:type StepMessage: str
"""
self.StepNo = None
self.StepName = None
self.StepCode = None
self.StepMessage = None
class SyncDetailInfo(AbstractModel):
"""描述详细同步任务过程
"""
def __init__(self):
"""
:param StepAll: 总步骤数
:type StepAll: int
:param StepNow: 当前步骤
:type StepNow: int
:param Progress: 总进度
:type Progress: str
:param CurrentStepProgress: 当前步骤进度
:type CurrentStepProgress: str
:param MasterSlaveDistance: 主从差距,MB
:type MasterSlaveDistance: int
:param SecondsBehindMaster: 主从差距,秒
:type SecondsBehindMaster: int
:param StepInfo: 步骤信息
:type StepInfo: list of SyncStepDetailInfo
"""
self.StepAll = None
self.StepNow = None
self.Progress = None
self.CurrentStepProgress = None
self.MasterSlaveDistance = None
self.SecondsBehindMaster = None
self.StepInfo = None
class SyncInstanceInfo(AbstractModel):
"""灾备同步的实例信息,记录主实例或灾备实例的信息
"""
def __init__(self):
"""
:param Region: 地域英文名,如:ap-guangzhou
:type Region: str
:param InstanceId: 实例短ID
:type InstanceId: str
"""
self.Region = None
self.InstanceId = None
class SyncJobInfo(AbstractModel):
"""灾备同步任务信息
"""
def __init__(self):
"""
:param JobId: 灾备任务id
:type JobId: str
:param JobName: 灾备任务名
:type JobName: str
:param SyncOption: 任务同步
:type SyncOption: :class:`tencentcloud.dts.v20180330.models.SyncOption`
:param SrcAccessType: 源接入类型
:type SrcAccessType: str
:param SrcDatabaseType: 源数据类型
:type SrcDatabaseType: str
:param SrcInfo: 源实例信息
:type SrcInfo: :class:`tencentcloud.dts.v20180330.models.SyncInstanceInfo`
:param DstAccessType: 灾备接入类型
:type DstAccessType: str
:param DstDatabaseType: 灾备数据类型
:type DstDatabaseType: str
:param DstInfo: 灾备实例信息
:type DstInfo: :class:`tencentcloud.dts.v20180330.models.SyncInstanceInfo`
:param Detail: 任务信息
:type Detail: :class:`tencentcloud.dts.v20180330.models.SyncDetailInfo`
:param Status: 任务状态
:type Status: int
:param DatabaseInfo: 迁移库表
:type DatabaseInfo: str
:param CreateTime: 创建时间
:type CreateTime: str
:param StartTime: 开始时间
:type StartTime: str
:param EndTime: 结束时间
:type EndTime: str
"""
self.JobId = None
self.JobName = None
self.SyncOption = None
self.SrcAccessType = None
self.SrcDatabaseType = None
self.SrcInfo = None
self.DstAccessType = None
self.DstDatabaseType = None
self.DstInfo = None
self.Detail = None
self.Status = None
self.DatabaseInfo = None
self.CreateTime = None
self.StartTime = None
self.EndTime = None
class SyncOption(AbstractModel):
"""灾备同步任务配置选项
"""
def __init__(self):
"""
:param SyncObject: 同步对象,1-整个实例,2-指定库表
:type SyncObject: int
:param RunMode: 同步开始设置,1-立即开始
:type RunMode: int
:param SyncType: 同步模式, 3-全量且增量同步
:type SyncType: int
:param ConsistencyType: 数据一致性检测, 1-无需配置
:type ConsistencyType: int
"""
self.SyncObject = None
self.RunMode = None
self.SyncType = None
self.ConsistencyType = None
class SyncStepDetailInfo(AbstractModel):
"""同步任务进度
"""
def __init__(self):
"""
:param StepNo: 步骤编号
:type StepNo: int
:param StepName: 步骤名
:type StepName: str
:param CanStop: 能否中止
:type CanStop: int
:param StepId: 步骤号
:type StepId: int
"""
self.StepNo = None
self.StepName = None
self.CanStop = None
self.StepId = None
class TagFilter(AbstractModel):
"""标签过滤
"""
def __init__(self):
"""
:param TagKey: 标签键值
:type TagKey: str
:param TagValue: 标签值
:type TagValue: list of str
"""
self.TagKey = None
self.TagValue = None
class TagItem(AbstractModel):
"""标签
"""
def __init__(self):
"""
:param TagKey: 标签键值
:type TagKey: str
:param TagValue: 标签值
注意:此字段可能返回 null,表示取不到有效值。
:type TagValue: str
"""
self.TagKey = None
self.TagValue = None
| 31.526057 | 236 | 0.600867 | # -*- coding: utf8 -*-
# Copyright (c) 2017-2018 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from tencentcloud.common.abstract_model import AbstractModel
class ActivateSubscribeRequest(AbstractModel):
"""ActivateSubscribe请求参数结构体
"""
def __init__(self):
"""
:param SubscribeId: 订阅实例ID。
:type SubscribeId: str
:param InstanceId: 数据库实例ID
:type InstanceId: str
:param SubscribeObjectType: 数据订阅类型0-全实例订阅,1数据订阅,2结构订阅,3数据订阅与结构订阅
:type SubscribeObjectType: int
:param Objects: 订阅对象
:type Objects: :class:`tencentcloud.dts.v20180330.models.SubscribeObject`
:param UniqSubnetId: 数据订阅服务所在子网。默认为数据库实例所在的子网内。
:type UniqSubnetId: str
:param Vport: 订阅服务端口;默认为7507
:type Vport: int
"""
self.SubscribeId = None
self.InstanceId = None
self.SubscribeObjectType = None
self.Objects = None
self.UniqSubnetId = None
self.Vport = None
def _deserialize(self, params):
self.SubscribeId = params.get("SubscribeId")
self.InstanceId = params.get("InstanceId")
self.SubscribeObjectType = params.get("SubscribeObjectType")
if params.get("Objects") is not None:
self.Objects = SubscribeObject()
self.Objects._deserialize(params.get("Objects"))
self.UniqSubnetId = params.get("UniqSubnetId")
self.Vport = params.get("Vport")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class ActivateSubscribeResponse(AbstractModel):
"""ActivateSubscribe返回参数结构体
"""
def __init__(self):
"""
:param AsyncRequestId: 配置数据订阅任务ID。
:type AsyncRequestId: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.AsyncRequestId = None
self.RequestId = None
def _deserialize(self, params):
self.AsyncRequestId = params.get("AsyncRequestId")
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class CompleteMigrateJobRequest(AbstractModel):
"""CompleteMigrateJob请求参数结构体
"""
def __init__(self):
"""
:param JobId: 数据迁移任务ID
:type JobId: str
"""
self.JobId = None
def _deserialize(self, params):
self.JobId = params.get("JobId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class CompleteMigrateJobResponse(AbstractModel):
"""CompleteMigrateJob返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class ConsistencyParams(AbstractModel):
"""抽样检验时的抽样参数
"""
def __init__(self):
"""
:param SelectRowsPerTable: 数据内容检测参数。表中选出用来数据对比的行,占表的总行数的百分比。取值范围是整数[1-100]
:type SelectRowsPerTable: int
:param TablesSelectAll: 数据内容检测参数。迁移库表中,要进行数据内容检测的表,占所有表的百分比。取值范围是整数[1-100]
:type TablesSelectAll: int
:param TablesSelectCount: 数据数量检测,检测表行数是否一致。迁移库表中,要进行数据数量检测的表,占所有表的百分比。取值范围是整数[1-100]
:type TablesSelectCount: int
"""
self.SelectRowsPerTable = None
self.TablesSelectAll = None
self.TablesSelectCount = None
def _deserialize(self, params):
self.SelectRowsPerTable = params.get("SelectRowsPerTable")
self.TablesSelectAll = params.get("TablesSelectAll")
self.TablesSelectCount = params.get("TablesSelectCount")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class CreateMigrateCheckJobRequest(AbstractModel):
"""CreateMigrateCheckJob请求参数结构体
"""
def __init__(self):
"""
:param JobId: 数据迁移任务ID
:type JobId: str
"""
self.JobId = None
def _deserialize(self, params):
self.JobId = params.get("JobId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class CreateMigrateCheckJobResponse(AbstractModel):
"""CreateMigrateCheckJob返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class CreateMigrateJobRequest(AbstractModel):
"""CreateMigrateJob请求参数结构体
"""
def __init__(self):
"""
:param JobName: 数据迁移任务名称
:type JobName: str
:param MigrateOption: 迁移任务配置选项
:type MigrateOption: :class:`tencentcloud.dts.v20180330.models.MigrateOption`
:param SrcDatabaseType: 源实例数据库类型,目前支持:mysql,redis,mongodb,postgresql,mariadb,percona。不同地域数据库类型的具体支持情况,请参考控制台创建迁移页面。
:type SrcDatabaseType: str
:param SrcAccessType: 源实例接入类型,值包括:extranet(外网),cvm(CVM自建实例),dcg(专线接入的实例),vpncloud(云VPN接入的实例),cdb(腾讯云数据库实例),ccn(云联网实例)
:type SrcAccessType: str
:param SrcInfo: 源实例信息,具体内容跟迁移任务类型相关
:type SrcInfo: :class:`tencentcloud.dts.v20180330.models.SrcInfo`
:param DstDatabaseType: 目标实例数据库类型,目前支持:mysql,redis,mongodb,postgresql,mariadb,percona。不同地域数据库类型的具体支持情况,请参考控制台创建迁移页面。
:type DstDatabaseType: str
:param DstAccessType: 目标实例接入类型,目前支持:cdb(腾讯云数据库实例)
:type DstAccessType: str
:param DstInfo: 目标实例信息
:type DstInfo: :class:`tencentcloud.dts.v20180330.models.DstInfo`
:param DatabaseInfo: 需要迁移的源数据库表信息,用json格式的字符串描述。当MigrateOption.MigrateObject配置为2(指定库表迁移)时必填。
对于database-table两级结构的数据库:
[{Database:db1,Table:[table1,table2]},{Database:db2}]
对于database-schema-table三级结构:
[{Database:db1,Schema:s1
Table:[table1,table2]},{Database:db1,Schema:s2
Table:[table1,table2]},{Database:db2,Schema:s1
Table:[table1,table2]},{Database:db3},{Database:db4
Schema:s1}]
:type DatabaseInfo: str
"""
self.JobName = None
self.MigrateOption = None
self.SrcDatabaseType = None
self.SrcAccessType = None
self.SrcInfo = None
self.DstDatabaseType = None
self.DstAccessType = None
self.DstInfo = None
self.DatabaseInfo = None
def _deserialize(self, params):
self.JobName = params.get("JobName")
if params.get("MigrateOption") is not None:
self.MigrateOption = MigrateOption()
self.MigrateOption._deserialize(params.get("MigrateOption"))
self.SrcDatabaseType = params.get("SrcDatabaseType")
self.SrcAccessType = params.get("SrcAccessType")
if params.get("SrcInfo") is not None:
self.SrcInfo = SrcInfo()
self.SrcInfo._deserialize(params.get("SrcInfo"))
self.DstDatabaseType = params.get("DstDatabaseType")
self.DstAccessType = params.get("DstAccessType")
if params.get("DstInfo") is not None:
self.DstInfo = DstInfo()
self.DstInfo._deserialize(params.get("DstInfo"))
self.DatabaseInfo = params.get("DatabaseInfo")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class CreateMigrateJobResponse(AbstractModel):
"""CreateMigrateJob返回参数结构体
"""
def __init__(self):
"""
:param JobId: 数据迁移任务ID
:type JobId: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.JobId = None
self.RequestId = None
def _deserialize(self, params):
self.JobId = params.get("JobId")
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class CreateSubscribeRequest(AbstractModel):
"""CreateSubscribe请求参数结构体
"""
def __init__(self):
"""
:param Product: 订阅的数据库类型,目前支持的有 mysql
:type Product: str
:param PayType: 实例付费类型,1小时计费,0包年包月
:type PayType: int
:param Duration: 购买时长。PayType为0时必填。单位为月,最大支持120
:type Duration: int
:param Count: 购买数量,默认为1,最大为10
:type Count: int
:param AutoRenew: 是否自动续费,默认为0,1表示自动续费。小时计费实例设置该标识无效。
:type AutoRenew: int
:param Tags: 实例资源标签
:type Tags: list of TagItem
"""
self.Product = None
self.PayType = None
self.Duration = None
self.Count = None
self.AutoRenew = None
self.Tags = None
def _deserialize(self, params):
self.Product = params.get("Product")
self.PayType = params.get("PayType")
self.Duration = params.get("Duration")
self.Count = params.get("Count")
self.AutoRenew = params.get("AutoRenew")
if params.get("Tags") is not None:
self.Tags = []
for item in params.get("Tags"):
obj = TagItem()
obj._deserialize(item)
self.Tags.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class CreateSubscribeResponse(AbstractModel):
"""CreateSubscribe返回参数结构体
"""
def __init__(self):
"""
:param SubscribeIds: 数据订阅实例的ID数组
注意:此字段可能返回 null,表示取不到有效值。
:type SubscribeIds: list of str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.SubscribeIds = None
self.RequestId = None
def _deserialize(self, params):
self.SubscribeIds = params.get("SubscribeIds")
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class CreateSyncCheckJobRequest(AbstractModel):
"""CreateSyncCheckJob请求参数结构体
"""
def __init__(self):
"""
:param JobId: 灾备同步任务ID
:type JobId: str
"""
self.JobId = None
def _deserialize(self, params):
self.JobId = params.get("JobId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class CreateSyncCheckJobResponse(AbstractModel):
"""CreateSyncCheckJob返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class CreateSyncJobRequest(AbstractModel):
"""CreateSyncJob请求参数结构体
"""
def __init__(self):
"""
:param JobName: 灾备同步任务名
:type JobName: str
:param SyncOption: 灾备同步任务配置选项
:type SyncOption: :class:`tencentcloud.dts.v20180330.models.SyncOption`
:param SrcDatabaseType: 源实例数据库类型,目前仅包括:mysql
:type SrcDatabaseType: str
:param SrcAccessType: 源实例接入类型,目前仅包括:cdb(云上cdb实例)
:type SrcAccessType: str
:param SrcInfo: 源实例信息
:type SrcInfo: :class:`tencentcloud.dts.v20180330.models.SyncInstanceInfo`
:param DstDatabaseType: 目标实例数据库类型,目前仅包括:mysql
:type DstDatabaseType: str
:param DstAccessType: 目标实例接入类型,目前仅包括:cdb(云上cdb实例)
:type DstAccessType: str
:param DstInfo: 目标实例信息
:type DstInfo: :class:`tencentcloud.dts.v20180330.models.SyncInstanceInfo`
:param DatabaseInfo: 需要同步的源数据库表信息,用json格式的字符串描述。
对于database-table两级结构的数据库:
[{Database:db1,Table:[table1,table2]},{Database:db2}]
:type DatabaseInfo: str
"""
self.JobName = None
self.SyncOption = None
self.SrcDatabaseType = None
self.SrcAccessType = None
self.SrcInfo = None
self.DstDatabaseType = None
self.DstAccessType = None
self.DstInfo = None
self.DatabaseInfo = None
def _deserialize(self, params):
self.JobName = params.get("JobName")
if params.get("SyncOption") is not None:
self.SyncOption = SyncOption()
self.SyncOption._deserialize(params.get("SyncOption"))
self.SrcDatabaseType = params.get("SrcDatabaseType")
self.SrcAccessType = params.get("SrcAccessType")
if params.get("SrcInfo") is not None:
self.SrcInfo = SyncInstanceInfo()
self.SrcInfo._deserialize(params.get("SrcInfo"))
self.DstDatabaseType = params.get("DstDatabaseType")
self.DstAccessType = params.get("DstAccessType")
if params.get("DstInfo") is not None:
self.DstInfo = SyncInstanceInfo()
self.DstInfo._deserialize(params.get("DstInfo"))
self.DatabaseInfo = params.get("DatabaseInfo")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class CreateSyncJobResponse(AbstractModel):
"""CreateSyncJob返回参数结构体
"""
def __init__(self):
"""
:param JobId: 灾备同步任务ID
:type JobId: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.JobId = None
self.RequestId = None
def _deserialize(self, params):
self.JobId = params.get("JobId")
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DeleteMigrateJobRequest(AbstractModel):
"""DeleteMigrateJob请求参数结构体
"""
def __init__(self):
"""
:param JobId: 数据迁移任务ID
:type JobId: str
"""
self.JobId = None
def _deserialize(self, params):
self.JobId = params.get("JobId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DeleteMigrateJobResponse(AbstractModel):
"""DeleteMigrateJob返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DeleteSyncJobRequest(AbstractModel):
"""DeleteSyncJob请求参数结构体
"""
def __init__(self):
"""
:param JobId: 待删除的灾备同步任务ID
:type JobId: str
"""
self.JobId = None
def _deserialize(self, params):
self.JobId = params.get("JobId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DeleteSyncJobResponse(AbstractModel):
"""DeleteSyncJob返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeAsyncRequestInfoRequest(AbstractModel):
"""DescribeAsyncRequestInfo请求参数结构体
"""
def __init__(self):
"""
:param AsyncRequestId: 任务 ID
:type AsyncRequestId: str
"""
self.AsyncRequestId = None
def _deserialize(self, params):
self.AsyncRequestId = params.get("AsyncRequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeAsyncRequestInfoResponse(AbstractModel):
"""DescribeAsyncRequestInfo返回参数结构体
"""
def __init__(self):
"""
:param Info: 任务执行结果信息
:type Info: str
:param Status: 任务执行状态,可能的值有:success,failed,running
:type Status: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Info = None
self.Status = None
self.RequestId = None
def _deserialize(self, params):
self.Info = params.get("Info")
self.Status = params.get("Status")
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeMigrateCheckJobRequest(AbstractModel):
"""DescribeMigrateCheckJob请求参数结构体
"""
def __init__(self):
"""
:param JobId: 数据迁移任务ID
:type JobId: str
"""
self.JobId = None
def _deserialize(self, params):
self.JobId = params.get("JobId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeMigrateCheckJobResponse(AbstractModel):
"""DescribeMigrateCheckJob返回参数结构体
"""
def __init__(self):
"""
:param Status: 校验任务状态:unavailable(当前不可用), starting(开始中),running(校验中),finished(校验完成)
:type Status: str
:param ErrorCode: 任务的错误码
:type ErrorCode: int
:param ErrorMessage: 任务的错误信息
:type ErrorMessage: str
:param Progress: Check任务总进度,如:"30"表示30%
:type Progress: str
:param CheckFlag: 校验是否通过,0-未通过,1-校验通过, 3-未校验
:type CheckFlag: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Status = None
self.ErrorCode = None
self.ErrorMessage = None
self.Progress = None
self.CheckFlag = None
self.RequestId = None
def _deserialize(self, params):
self.Status = params.get("Status")
self.ErrorCode = params.get("ErrorCode")
self.ErrorMessage = params.get("ErrorMessage")
self.Progress = params.get("Progress")
self.CheckFlag = params.get("CheckFlag")
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeMigrateJobsRequest(AbstractModel):
"""DescribeMigrateJobs请求参数结构体
"""
def __init__(self):
"""
:param JobId: 数据迁移任务ID
:type JobId: str
:param JobName: 数据迁移任务名称
:type JobName: str
:param Order: 排序字段,可以取值为JobId、Status、JobName、MigrateType、RunMode、CreateTime
:type Order: str
:param OrderSeq: 排序方式,升序为ASC,降序为DESC
:type OrderSeq: str
:param Offset: 偏移量,默认为0
:type Offset: int
:param Limit: 返回实例数量,默认20,有效区间[1,100]
:type Limit: int
"""
self.JobId = None
self.JobName = None
self.Order = None
self.OrderSeq = None
self.Offset = None
self.Limit = None
def _deserialize(self, params):
self.JobId = params.get("JobId")
self.JobName = params.get("JobName")
self.Order = params.get("Order")
self.OrderSeq = params.get("OrderSeq")
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeMigrateJobsResponse(AbstractModel):
"""DescribeMigrateJobs返回参数结构体
"""
def __init__(self):
"""
:param TotalCount: 任务数目
:type TotalCount: int
:param JobList: 任务详情数组
:type JobList: list of MigrateJobInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.JobList = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
if params.get("JobList") is not None:
self.JobList = []
for item in params.get("JobList"):
obj = MigrateJobInfo()
obj._deserialize(item)
self.JobList.append(obj)
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeRegionConfRequest(AbstractModel):
"""DescribeRegionConf请求参数结构体
"""
class DescribeRegionConfResponse(AbstractModel):
"""DescribeRegionConf返回参数结构体
"""
def __init__(self):
"""
:param TotalCount: 可售卖地域的数量
:type TotalCount: int
:param Items: 可售卖地域详情
:type Items: list of SubscribeRegionConf
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.Items = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
if params.get("Items") is not None:
self.Items = []
for item in params.get("Items"):
obj = SubscribeRegionConf()
obj._deserialize(item)
self.Items.append(obj)
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeSubscribeConfRequest(AbstractModel):
"""DescribeSubscribeConf请求参数结构体
"""
def __init__(self):
"""
:param SubscribeId: 订阅实例ID
:type SubscribeId: str
"""
self.SubscribeId = None
def _deserialize(self, params):
self.SubscribeId = params.get("SubscribeId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeSubscribeConfResponse(AbstractModel):
"""DescribeSubscribeConf返回参数结构体
"""
def __init__(self):
"""
:param SubscribeId: 订阅实例ID
:type SubscribeId: str
:param SubscribeName: 订阅实例名称
:type SubscribeName: str
:param ChannelId: 订阅通道
:type ChannelId: str
:param Product: 订阅数据库类型
:type Product: str
:param InstanceId: 被订阅的实例
:type InstanceId: str
:param InstanceStatus: 被订阅的实例的状态,可能的值有running,offline,isolate
:type InstanceStatus: str
:param SubsStatus: 订阅实例状态,可能的值有unconfigure-未配置,configuring-配置中,configured-已配置
:type SubsStatus: str
:param Status: 订阅实例生命周期状态,可能的值有:normal-正常,isolating-隔离中,isolated-已隔离,offlining-下线中
:type Status: str
:param CreateTime: 订阅实例创建时间
:type CreateTime: str
:param IsolateTime: 订阅实例被隔离时间
:type IsolateTime: str
:param ExpireTime: 订阅实例到期时间
:type ExpireTime: str
:param OfflineTime: 订阅实例下线时间
:type OfflineTime: str
:param ConsumeStartTime: 订阅实例消费时间起点。
:type ConsumeStartTime: str
:param PayType: 订阅实例计费类型,1-小时计费,0-包年包月
:type PayType: int
:param Vip: 订阅通道Vip
:type Vip: str
:param Vport: 订阅通道Port
:type Vport: int
:param UniqVpcId: 订阅通道所在VpcId
:type UniqVpcId: str
:param UniqSubnetId: 订阅通道所在SubnetId
:type UniqSubnetId: str
:param SdkConsumedTime: 当前SDK消费时间位点
:type SdkConsumedTime: str
:param SdkHost: 订阅SDK IP地址
:type SdkHost: str
:param SubscribeObjectType: 订阅对象类型0-全实例订阅,1-DDL数据订阅,2-DML结构订阅,3-DDL数据订阅+DML结构订阅
:type SubscribeObjectType: int
:param SubscribeObjects: 订阅对象,当SubscribeObjectType 为0时,此字段为空数组
:type SubscribeObjects: list of SubscribeObject
:param ModifyTime: 修改时间
:type ModifyTime: str
:param Region: 地域
:type Region: str
:param Tags: 订阅实例的标签
注意:此字段可能返回 null,表示取不到有效值。
:type Tags: list of TagItem
:param AutoRenewFlag: 自动续费标识,0-不自动续费,1-自动续费
注意:此字段可能返回 null,表示取不到有效值。
:type AutoRenewFlag: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.SubscribeId = None
self.SubscribeName = None
self.ChannelId = None
self.Product = None
self.InstanceId = None
self.InstanceStatus = None
self.SubsStatus = None
self.Status = None
self.CreateTime = None
self.IsolateTime = None
self.ExpireTime = None
self.OfflineTime = None
self.ConsumeStartTime = None
self.PayType = None
self.Vip = None
self.Vport = None
self.UniqVpcId = None
self.UniqSubnetId = None
self.SdkConsumedTime = None
self.SdkHost = None
self.SubscribeObjectType = None
self.SubscribeObjects = None
self.ModifyTime = None
self.Region = None
self.Tags = None
self.AutoRenewFlag = None
self.RequestId = None
def _deserialize(self, params):
self.SubscribeId = params.get("SubscribeId")
self.SubscribeName = params.get("SubscribeName")
self.ChannelId = params.get("ChannelId")
self.Product = params.get("Product")
self.InstanceId = params.get("InstanceId")
self.InstanceStatus = params.get("InstanceStatus")
self.SubsStatus = params.get("SubsStatus")
self.Status = params.get("Status")
self.CreateTime = params.get("CreateTime")
self.IsolateTime = params.get("IsolateTime")
self.ExpireTime = params.get("ExpireTime")
self.OfflineTime = params.get("OfflineTime")
self.ConsumeStartTime = params.get("ConsumeStartTime")
self.PayType = params.get("PayType")
self.Vip = params.get("Vip")
self.Vport = params.get("Vport")
self.UniqVpcId = params.get("UniqVpcId")
self.UniqSubnetId = params.get("UniqSubnetId")
self.SdkConsumedTime = params.get("SdkConsumedTime")
self.SdkHost = params.get("SdkHost")
self.SubscribeObjectType = params.get("SubscribeObjectType")
if params.get("SubscribeObjects") is not None:
self.SubscribeObjects = []
for item in params.get("SubscribeObjects"):
obj = SubscribeObject()
obj._deserialize(item)
self.SubscribeObjects.append(obj)
self.ModifyTime = params.get("ModifyTime")
self.Region = params.get("Region")
if params.get("Tags") is not None:
self.Tags = []
for item in params.get("Tags"):
obj = TagItem()
obj._deserialize(item)
self.Tags.append(obj)
self.AutoRenewFlag = params.get("AutoRenewFlag")
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeSubscribesRequest(AbstractModel):
"""DescribeSubscribes请求参数结构体
"""
def __init__(self):
"""
:param SubscribeId: 数据订阅的实例ID
:type SubscribeId: str
:param SubscribeName: 数据订阅的实例名称
:type SubscribeName: str
:param InstanceId: 绑定数据库实例的ID
:type InstanceId: str
:param ChannelId: 数据订阅实例的通道ID
:type ChannelId: str
:param PayType: 计费模式筛选,可能的值:0-包年包月,1-按量计费
:type PayType: str
:param Product: 订阅的数据库产品,如mysql
:type Product: str
:param Status: 数据订阅实例的状态,creating - 创建中,normal - 正常运行,isolating - 隔离中,isolated - 已隔离,offlining - 下线中
:type Status: list of str
:param SubsStatus: 数据订阅实例的配置状态,unconfigure - 未配置, configuring - 配置中,configured - 已配置
:type SubsStatus: list of str
:param Offset: 返回记录的起始偏移量
:type Offset: int
:param Limit: 单次返回的记录数量
:type Limit: int
:param OrderDirection: 排序方向,可选的值为"DESC"和"ASC",默认为"DESC",按创建时间逆序排序
:type OrderDirection: str
:param TagFilters: 标签过滤条件
:type TagFilters: list of TagFilter
:param SubscribeVersion: 订阅实例版本;txdts-旧版数据订阅,kafka-kafka版本数据订阅
:type SubscribeVersion: str
"""
self.SubscribeId = None
self.SubscribeName = None
self.InstanceId = None
self.ChannelId = None
self.PayType = None
self.Product = None
self.Status = None
self.SubsStatus = None
self.Offset = None
self.Limit = None
self.OrderDirection = None
self.TagFilters = None
self.SubscribeVersion = None
def _deserialize(self, params):
self.SubscribeId = params.get("SubscribeId")
self.SubscribeName = params.get("SubscribeName")
self.InstanceId = params.get("InstanceId")
self.ChannelId = params.get("ChannelId")
self.PayType = params.get("PayType")
self.Product = params.get("Product")
self.Status = params.get("Status")
self.SubsStatus = params.get("SubsStatus")
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
self.OrderDirection = params.get("OrderDirection")
if params.get("TagFilters") is not None:
self.TagFilters = []
for item in params.get("TagFilters"):
obj = TagFilter()
obj._deserialize(item)
self.TagFilters.append(obj)
self.SubscribeVersion = params.get("SubscribeVersion")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeSubscribesResponse(AbstractModel):
"""DescribeSubscribes返回参数结构体
"""
def __init__(self):
"""
:param TotalCount: 符合查询条件的实例总数
:type TotalCount: int
:param Items: 数据订阅实例的信息列表
:type Items: list of SubscribeInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.Items = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
if params.get("Items") is not None:
self.Items = []
for item in params.get("Items"):
obj = SubscribeInfo()
obj._deserialize(item)
self.Items.append(obj)
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeSyncCheckJobRequest(AbstractModel):
"""DescribeSyncCheckJob请求参数结构体
"""
def __init__(self):
"""
:param JobId: 要查询的灾备同步任务ID
:type JobId: str
"""
self.JobId = None
def _deserialize(self, params):
self.JobId = params.get("JobId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeSyncCheckJobResponse(AbstractModel):
"""DescribeSyncCheckJob返回参数结构体
"""
def __init__(self):
"""
:param Status: 任务校验状态: starting(开始中),running(校验中),finished(校验完成)
:type Status: str
:param ErrorCode: 任务校验结果代码
:type ErrorCode: int
:param ErrorMessage: 提示信息
:type ErrorMessage: str
:param StepInfo: 任务执行步骤描述
:type StepInfo: list of SyncCheckStepInfo
:param CheckFlag: 校验标志:0(尚未校验成功) , 1(校验成功)
:type CheckFlag: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Status = None
self.ErrorCode = None
self.ErrorMessage = None
self.StepInfo = None
self.CheckFlag = None
self.RequestId = None
def _deserialize(self, params):
self.Status = params.get("Status")
self.ErrorCode = params.get("ErrorCode")
self.ErrorMessage = params.get("ErrorMessage")
if params.get("StepInfo") is not None:
self.StepInfo = []
for item in params.get("StepInfo"):
obj = SyncCheckStepInfo()
obj._deserialize(item)
self.StepInfo.append(obj)
self.CheckFlag = params.get("CheckFlag")
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeSyncJobsRequest(AbstractModel):
"""DescribeSyncJobs请求参数结构体
"""
def __init__(self):
"""
:param JobId: 灾备同步任务ID
:type JobId: str
:param JobName: 灾备同步任务名
:type JobName: str
:param Order: 排序字段,可以取值为JobId、Status、JobName、CreateTime
:type Order: str
:param OrderSeq: 排序方式,升序为ASC,降序为DESC
:type OrderSeq: str
:param Offset: 偏移量,默认为0
:type Offset: int
:param Limit: 返回实例数量,默认20,有效区间[1,100]
:type Limit: int
"""
self.JobId = None
self.JobName = None
self.Order = None
self.OrderSeq = None
self.Offset = None
self.Limit = None
def _deserialize(self, params):
self.JobId = params.get("JobId")
self.JobName = params.get("JobName")
self.Order = params.get("Order")
self.OrderSeq = params.get("OrderSeq")
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeSyncJobsResponse(AbstractModel):
"""DescribeSyncJobs返回参数结构体
"""
def __init__(self):
"""
:param TotalCount: 任务数目
:type TotalCount: int
:param JobList: 任务详情数组
:type JobList: list of SyncJobInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.JobList = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
if params.get("JobList") is not None:
self.JobList = []
for item in params.get("JobList"):
obj = SyncJobInfo()
obj._deserialize(item)
self.JobList.append(obj)
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DstInfo(AbstractModel):
"""目的实例信息,具体内容跟迁移任务类型相关
"""
def __init__(self):
"""
:param InstanceId: 目标实例ID,如cdb-jd92ijd8
:type InstanceId: str
:param Region: 目标实例地域,如ap-guangzhou
:type Region: str
:param Ip: 目标实例vip。已废弃,无需填写
:type Ip: str
:param Port: 目标实例vport。已废弃,无需填写
:type Port: int
:param ReadOnly: 目前只对MySQL有效。当为整实例迁移时,1-只读,0-可读写。
:type ReadOnly: int
:param User: 目标数据库账号
:type User: str
:param Password: 目标数据库密码
:type Password: str
"""
self.InstanceId = None
self.Region = None
self.Ip = None
self.Port = None
self.ReadOnly = None
self.User = None
self.Password = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.Region = params.get("Region")
self.Ip = params.get("Ip")
self.Port = params.get("Port")
self.ReadOnly = params.get("ReadOnly")
self.User = params.get("User")
self.Password = params.get("Password")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class ErrorInfo(AbstractModel):
"""迁移任务错误信息及提示
"""
def __init__(self):
"""
:param ErrorLog: 具体的报错日志, 包含错误码和错误信息
:type ErrorLog: str
:param HelpDoc: 报错对应的帮助文档Ur
:type HelpDoc: str
"""
self.ErrorLog = None
self.HelpDoc = None
def _deserialize(self, params):
self.ErrorLog = params.get("ErrorLog")
self.HelpDoc = params.get("HelpDoc")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class IsolateSubscribeRequest(AbstractModel):
"""IsolateSubscribe请求参数结构体
"""
def __init__(self):
"""
:param SubscribeId: 订阅实例ID
:type SubscribeId: str
"""
self.SubscribeId = None
def _deserialize(self, params):
self.SubscribeId = params.get("SubscribeId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class IsolateSubscribeResponse(AbstractModel):
"""IsolateSubscribe返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class MigrateDetailInfo(AbstractModel):
"""描述详细迁移过程
"""
def __init__(self):
"""
:param StepAll: 总步骤数
:type StepAll: int
:param StepNow: 当前步骤
:type StepNow: int
:param Progress: 总进度,如:"10"
:type Progress: str
:param CurrentStepProgress: 当前步骤进度,如:"1"
:type CurrentStepProgress: str
:param MasterSlaveDistance: 主从差距,MB;在增量同步阶段有效,目前支持产品为:redis和mysql
:type MasterSlaveDistance: int
:param SecondsBehindMaster: 主从差距,秒;在增量同步阶段有效,目前支持产品为:mysql
:type SecondsBehindMaster: int
:param StepInfo: 步骤信息
:type StepInfo: list of MigrateStepDetailInfo
"""
self.StepAll = None
self.StepNow = None
self.Progress = None
self.CurrentStepProgress = None
self.MasterSlaveDistance = None
self.SecondsBehindMaster = None
self.StepInfo = None
def _deserialize(self, params):
self.StepAll = params.get("StepAll")
self.StepNow = params.get("StepNow")
self.Progress = params.get("Progress")
self.CurrentStepProgress = params.get("CurrentStepProgress")
self.MasterSlaveDistance = params.get("MasterSlaveDistance")
self.SecondsBehindMaster = params.get("SecondsBehindMaster")
if params.get("StepInfo") is not None:
self.StepInfo = []
for item in params.get("StepInfo"):
obj = MigrateStepDetailInfo()
obj._deserialize(item)
self.StepInfo.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class MigrateJobInfo(AbstractModel):
"""迁移任务详情
"""
def __init__(self):
"""
:param JobId: 数据迁移任务ID
:type JobId: str
:param JobName: 数据迁移任务名称
:type JobName: str
:param MigrateOption: 迁移任务配置选项
:type MigrateOption: :class:`tencentcloud.dts.v20180330.models.MigrateOption`
:param SrcDatabaseType: 源实例数据库类型:mysql,redis,mongodb,postgresql,mariadb,percona
:type SrcDatabaseType: str
:param SrcAccessType: 源实例接入类型,值包括:extranet(外网),cvm(cvm自建实例),dcg(专线接入的实例),vpncloud(云vpn接入的实例),cdb(腾讯云数据库实例),ccn(云联网实例)
:type SrcAccessType: str
:param SrcInfo: 源实例信息,具体内容跟迁移任务类型相关
:type SrcInfo: :class:`tencentcloud.dts.v20180330.models.SrcInfo`
:param DstDatabaseType: 目标实例数据库类型:mysql,redis,mongodb,postgresql,mariadb,percona
:type DstDatabaseType: str
:param DstAccessType: 目标实例接入类型,目前支持:cdb(腾讯云数据库实例)
:type DstAccessType: str
:param DstInfo: 目标实例信息
:type DstInfo: :class:`tencentcloud.dts.v20180330.models.DstInfo`
:param DatabaseInfo: 需要迁移的源数据库表信息,如果需要迁移的是整个实例,该字段为[]
:type DatabaseInfo: str
:param CreateTime: 任务创建(提交)时间
:type CreateTime: str
:param StartTime: 任务开始执行时间
:type StartTime: str
:param EndTime: 任务执行结束时间
:type EndTime: str
:param Status: 任务状态,取值为:1-创建中(Creating),3-校验中(Checking)4-校验通过(CheckPass),5-校验不通过(CheckNotPass),7-任务运行(Running),8-准备完成(ReadyComplete),9-任务成功(Success),10-任务失败(Failed),11-撤销中(Stopping),12-完成中(Completing)
:type Status: int
:param Detail: 任务详情
:type Detail: :class:`tencentcloud.dts.v20180330.models.MigrateDetailInfo`
:param ErrorInfo: 任务错误信息提示,当任务发生错误时,不为null或者空值
:type ErrorInfo: list of ErrorInfo
"""
self.JobId = None
self.JobName = None
self.MigrateOption = None
self.SrcDatabaseType = None
self.SrcAccessType = None
self.SrcInfo = None
self.DstDatabaseType = None
self.DstAccessType = None
self.DstInfo = None
self.DatabaseInfo = None
self.CreateTime = None
self.StartTime = None
self.EndTime = None
self.Status = None
self.Detail = None
self.ErrorInfo = None
def _deserialize(self, params):
self.JobId = params.get("JobId")
self.JobName = params.get("JobName")
if params.get("MigrateOption") is not None:
self.MigrateOption = MigrateOption()
self.MigrateOption._deserialize(params.get("MigrateOption"))
self.SrcDatabaseType = params.get("SrcDatabaseType")
self.SrcAccessType = params.get("SrcAccessType")
if params.get("SrcInfo") is not None:
self.SrcInfo = SrcInfo()
self.SrcInfo._deserialize(params.get("SrcInfo"))
self.DstDatabaseType = params.get("DstDatabaseType")
self.DstAccessType = params.get("DstAccessType")
if params.get("DstInfo") is not None:
self.DstInfo = DstInfo()
self.DstInfo._deserialize(params.get("DstInfo"))
self.DatabaseInfo = params.get("DatabaseInfo")
self.CreateTime = params.get("CreateTime")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.Status = params.get("Status")
if params.get("Detail") is not None:
self.Detail = MigrateDetailInfo()
self.Detail._deserialize(params.get("Detail"))
if params.get("ErrorInfo") is not None:
self.ErrorInfo = []
for item in params.get("ErrorInfo"):
obj = ErrorInfo()
obj._deserialize(item)
self.ErrorInfo.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class MigrateOption(AbstractModel):
"""迁移任务配置选项
"""
def __init__(self):
"""
:param RunMode: 任务运行模式,值包括:1-立即执行,2-定时执行
:type RunMode: int
:param ExpectTime: 期望执行时间,当runMode=2时,该字段必填,时间格式:yyyy-mm-dd hh:mm:ss
:type ExpectTime: str
:param MigrateType: 数据迁移类型,值包括:1-结构迁移,2-全量迁移,3-全量+增量迁移
:type MigrateType: int
:param MigrateObject: 迁移对象,1-整个实例,2-指定库表
:type MigrateObject: int
:param ConsistencyType: 抽样数据一致性检测参数,1-未配置,2-全量检测,3-抽样检测, 4-仅校验不一致表,5-不检测
:type ConsistencyType: int
:param IsOverrideRoot: 是否用源库Root账户覆盖目标库,值包括:0-不覆盖,1-覆盖,选择库表或者结构迁移时应该为0
:type IsOverrideRoot: int
:param ExternParams: 不同数据库用到的额外参数.以JSON格式描述.
Redis可定义如下的参数:
{
"ClientOutputBufferHardLimit":512, 从机缓冲区的硬性容量限制(MB)
"ClientOutputBufferSoftLimit":512, 从机缓冲区的软性容量限制(MB)
"ClientOutputBufferPersistTime":60, 从机缓冲区的软性限制持续时间(秒)
"ReplBacklogSize":512, 环形缓冲区容量限制(MB)
"ReplTimeout":120, 复制超时时间(秒)
}
MongoDB可定义如下的参数:
{
'SrcAuthDatabase':'admin',
'SrcAuthFlag': "1",
'SrcAuthMechanism':"SCRAM-SHA-1"
}
MySQL暂不支持额外参数设置。
:type ExternParams: str
:param ConsistencyParams: 仅用于“抽样数据一致性检测”,ConsistencyType配置为抽样检测时,必选
:type ConsistencyParams: :class:`tencentcloud.dts.v20180330.models.ConsistencyParams`
"""
self.RunMode = None
self.ExpectTime = None
self.MigrateType = None
self.MigrateObject = None
self.ConsistencyType = None
self.IsOverrideRoot = None
self.ExternParams = None
self.ConsistencyParams = None
def _deserialize(self, params):
self.RunMode = params.get("RunMode")
self.ExpectTime = params.get("ExpectTime")
self.MigrateType = params.get("MigrateType")
self.MigrateObject = params.get("MigrateObject")
self.ConsistencyType = params.get("ConsistencyType")
self.IsOverrideRoot = params.get("IsOverrideRoot")
self.ExternParams = params.get("ExternParams")
if params.get("ConsistencyParams") is not None:
self.ConsistencyParams = ConsistencyParams()
self.ConsistencyParams._deserialize(params.get("ConsistencyParams"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class MigrateStepDetailInfo(AbstractModel):
"""迁移中的步骤信息
"""
def __init__(self):
"""
:param StepNo: 步骤序列
:type StepNo: int
:param StepName: 步骤展现名称
:type StepName: str
:param StepId: 步骤英文标识
:type StepId: str
:param Status: 步骤状态:0-默认值,1-成功,2-失败,3-执行中,4-未执行
:type Status: int
:param StartTime: 当前步骤开始的时间,格式为"yyyy-mm-dd hh:mm:ss",该字段不存在或者为空是无意义
注意:此字段可能返回 null,表示取不到有效值。
:type StartTime: str
"""
self.StepNo = None
self.StepName = None
self.StepId = None
self.Status = None
self.StartTime = None
def _deserialize(self, params):
self.StepNo = params.get("StepNo")
self.StepName = params.get("StepName")
self.StepId = params.get("StepId")
self.Status = params.get("Status")
self.StartTime = params.get("StartTime")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class ModifyMigrateJobRequest(AbstractModel):
"""ModifyMigrateJob请求参数结构体
"""
def __init__(self):
"""
:param JobId: 待修改的数据迁移任务ID
:type JobId: str
:param JobName: 数据迁移任务名称
:type JobName: str
:param MigrateOption: 迁移任务配置选项
:type MigrateOption: :class:`tencentcloud.dts.v20180330.models.MigrateOption`
:param SrcAccessType: 源实例接入类型,值包括:extranet(外网),cvm(CVM自建实例),dcg(专线接入的实例),vpncloud(云VPN接入的实例),cdb(云上CDB实例)
:type SrcAccessType: str
:param SrcInfo: 源实例信息,具体内容跟迁移任务类型相关
:type SrcInfo: :class:`tencentcloud.dts.v20180330.models.SrcInfo`
:param DstAccessType: 目标实例接入类型,值包括:extranet(外网),cvm(CVM自建实例),dcg(专线接入的实例),vpncloud(云VPN接入的实例),cdb(云上CDB实例). 目前只支持cdb.
:type DstAccessType: str
:param DstInfo: 目标实例信息, 其中目标实例地域不允许修改.
:type DstInfo: :class:`tencentcloud.dts.v20180330.models.DstInfo`
:param DatabaseInfo: 当选择'指定库表'迁移的时候, 需要设置待迁移的源数据库表信息,用符合json数组格式的字符串描述, 如下所例。
对于database-table两级结构的数据库:
[{"Database":"db1","Table":["table1","table2"]},{"Database":"db2"}]
对于database-schema-table三级结构:
[{"Database":"db1","Schema":"s1","Table":["table1","table2"]},{"Database":"db1","Schema":"s2","Table":["table1","table2"]},{"Database":"db2","Schema":"s1","Table":["table1","table2"]},{"Database":"db3"},{"Database":"db4","Schema":"s1"}]
如果是'整个实例'的迁移模式,不需设置该字段
:type DatabaseInfo: str
"""
self.JobId = None
self.JobName = None
self.MigrateOption = None
self.SrcAccessType = None
self.SrcInfo = None
self.DstAccessType = None
self.DstInfo = None
self.DatabaseInfo = None
def _deserialize(self, params):
self.JobId = params.get("JobId")
self.JobName = params.get("JobName")
if params.get("MigrateOption") is not None:
self.MigrateOption = MigrateOption()
self.MigrateOption._deserialize(params.get("MigrateOption"))
self.SrcAccessType = params.get("SrcAccessType")
if params.get("SrcInfo") is not None:
self.SrcInfo = SrcInfo()
self.SrcInfo._deserialize(params.get("SrcInfo"))
self.DstAccessType = params.get("DstAccessType")
if params.get("DstInfo") is not None:
self.DstInfo = DstInfo()
self.DstInfo._deserialize(params.get("DstInfo"))
self.DatabaseInfo = params.get("DatabaseInfo")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class ModifyMigrateJobResponse(AbstractModel):
"""ModifyMigrateJob返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class ModifySubscribeAutoRenewFlagRequest(AbstractModel):
"""ModifySubscribeAutoRenewFlag请求参数结构体
"""
def __init__(self):
"""
:param SubscribeId: 订阅实例ID,例如:subs-8uey736k
:type SubscribeId: str
:param AutoRenewFlag: 自动续费标识。1-自动续费,0-不自动续费
:type AutoRenewFlag: int
"""
self.SubscribeId = None
self.AutoRenewFlag = None
def _deserialize(self, params):
self.SubscribeId = params.get("SubscribeId")
self.AutoRenewFlag = params.get("AutoRenewFlag")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class ModifySubscribeAutoRenewFlagResponse(AbstractModel):
"""ModifySubscribeAutoRenewFlag返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class ModifySubscribeConsumeTimeRequest(AbstractModel):
"""ModifySubscribeConsumeTime请求参数结构体
"""
def __init__(self):
"""
:param SubscribeId: 数据订阅实例的ID
:type SubscribeId: str
:param ConsumeStartTime: 消费时间起点,也即是指定订阅数据的时间起点,时间格式如:Y-m-d h:m:s,取值范围为过去24小时之内
:type ConsumeStartTime: str
"""
self.SubscribeId = None
self.ConsumeStartTime = None
def _deserialize(self, params):
self.SubscribeId = params.get("SubscribeId")
self.ConsumeStartTime = params.get("ConsumeStartTime")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class ModifySubscribeConsumeTimeResponse(AbstractModel):
"""ModifySubscribeConsumeTime返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class ModifySubscribeNameRequest(AbstractModel):
"""ModifySubscribeName请求参数结构体
"""
def __init__(self):
"""
:param SubscribeId: 数据订阅实例的ID
:type SubscribeId: str
:param SubscribeName: 数据订阅实例的名称,长度限制为[1,60]
:type SubscribeName: str
"""
self.SubscribeId = None
self.SubscribeName = None
def _deserialize(self, params):
self.SubscribeId = params.get("SubscribeId")
self.SubscribeName = params.get("SubscribeName")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class ModifySubscribeNameResponse(AbstractModel):
"""ModifySubscribeName返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class ModifySubscribeObjectsRequest(AbstractModel):
"""ModifySubscribeObjects请求参数结构体
"""
def __init__(self):
"""
:param SubscribeId: 数据订阅实例的ID
:type SubscribeId: str
:param SubscribeObjectType: 数据订阅的类型,可选的值有:0 - 全实例订阅;1 - 数据订阅;2 - 结构订阅;3 - 数据订阅+结构订阅
:type SubscribeObjectType: int
:param Objects: 订阅的数据库表信息
:type Objects: list of SubscribeObject
"""
self.SubscribeId = None
self.SubscribeObjectType = None
self.Objects = None
def _deserialize(self, params):
self.SubscribeId = params.get("SubscribeId")
self.SubscribeObjectType = params.get("SubscribeObjectType")
if params.get("Objects") is not None:
self.Objects = []
for item in params.get("Objects"):
obj = SubscribeObject()
obj._deserialize(item)
self.Objects.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class ModifySubscribeObjectsResponse(AbstractModel):
"""ModifySubscribeObjects返回参数结构体
"""
def __init__(self):
"""
:param AsyncRequestId: 异步任务的ID
:type AsyncRequestId: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.AsyncRequestId = None
self.RequestId = None
def _deserialize(self, params):
self.AsyncRequestId = params.get("AsyncRequestId")
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class ModifySubscribeVipVportRequest(AbstractModel):
"""ModifySubscribeVipVport请求参数结构体
"""
def __init__(self):
"""
:param SubscribeId: 数据订阅实例的ID
:type SubscribeId: str
:param DstUniqSubnetId: 指定目的子网,如果传此参数,DstIp必须在目的子网内
:type DstUniqSubnetId: str
:param DstIp: 目标IP,与DstPort至少传一个
:type DstIp: str
:param DstPort: 目标PORT,支持范围为:[1025-65535]
:type DstPort: int
"""
self.SubscribeId = None
self.DstUniqSubnetId = None
self.DstIp = None
self.DstPort = None
def _deserialize(self, params):
self.SubscribeId = params.get("SubscribeId")
self.DstUniqSubnetId = params.get("DstUniqSubnetId")
self.DstIp = params.get("DstIp")
self.DstPort = params.get("DstPort")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class ModifySubscribeVipVportResponse(AbstractModel):
"""ModifySubscribeVipVport返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class ModifySyncJobRequest(AbstractModel):
"""ModifySyncJob请求参数结构体
"""
def __init__(self):
"""
:param JobId: 待修改的灾备同步任务ID
:type JobId: str
:param JobName: 灾备同步任务名称
:type JobName: str
:param SyncOption: 灾备同步任务配置选项
:type SyncOption: :class:`tencentcloud.dts.v20180330.models.SyncOption`
:param DatabaseInfo: 当选择'指定库表'灾备同步的时候, 需要设置待同步的源数据库表信息,用符合json数组格式的字符串描述, 如下所例。
对于database-table两级结构的数据库:
[{"Database":"db1","Table":["table1","table2"]},{"Database":"db2"}]
:type DatabaseInfo: str
"""
self.JobId = None
self.JobName = None
self.SyncOption = None
self.DatabaseInfo = None
def _deserialize(self, params):
self.JobId = params.get("JobId")
self.JobName = params.get("JobName")
if params.get("SyncOption") is not None:
self.SyncOption = SyncOption()
self.SyncOption._deserialize(params.get("SyncOption"))
self.DatabaseInfo = params.get("DatabaseInfo")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class ModifySyncJobResponse(AbstractModel):
"""ModifySyncJob返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class OfflineIsolatedSubscribeRequest(AbstractModel):
"""OfflineIsolatedSubscribe请求参数结构体
"""
def __init__(self):
"""
:param SubscribeId: 数据订阅实例的ID
:type SubscribeId: str
"""
self.SubscribeId = None
def _deserialize(self, params):
self.SubscribeId = params.get("SubscribeId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class OfflineIsolatedSubscribeResponse(AbstractModel):
"""OfflineIsolatedSubscribe返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class ResetSubscribeRequest(AbstractModel):
"""ResetSubscribe请求参数结构体
"""
def __init__(self):
"""
:param SubscribeId: 数据订阅实例的ID
:type SubscribeId: str
"""
self.SubscribeId = None
def _deserialize(self, params):
self.SubscribeId = params.get("SubscribeId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class ResetSubscribeResponse(AbstractModel):
"""ResetSubscribe返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class SrcInfo(AbstractModel):
"""源实例信息
"""
def __init__(self):
"""
:param AccessKey: 阿里云AccessKey。源库是阿里云RDS5.6适用
:type AccessKey: str
:param Ip: 实例的IP地址
:type Ip: str
:param Port: 实例的端口
:type Port: int
:param User: 实例的用户名
:type User: str
:param Password: 实例的密码
:type Password: str
:param RdsInstanceId: 阿里云RDS实例ID。源库是阿里云RDS5.6/5.6适用
:type RdsInstanceId: str
:param CvmInstanceId: CVM实例短ID,格式如:ins-olgl39y8,与云服务器控制台页面显示的实例ID相同。如果是CVM自建实例,需要传递此字段
:type CvmInstanceId: str
:param UniqDcgId: 专线网关ID,格式如:dcg-0rxtqqxb
:type UniqDcgId: str
:param VpcId: 私有网络ID,格式如:vpc-92jblxto
:type VpcId: str
:param SubnetId: 私有网络下的子网ID,格式如:subnet-3paxmkdz
:type SubnetId: str
:param UniqVpnGwId: VPN网关ID,格式如:vpngw-9ghexg7q
:type UniqVpnGwId: str
:param InstanceId: 数据库实例ID,格式如:cdb-powiqx8q
:type InstanceId: str
:param Region: 地域英文名,如:ap-guangzhou
:type Region: str
:param Supplier: 当实例为RDS实例时,填写为aliyun, 其他情况均填写others
:type Supplier: str
:param CcnId: 云联网ID,如:ccn-afp6kltc
注意:此字段可能返回 null,表示取不到有效值。
:type CcnId: str
:param EngineVersion: 数据库版本,当实例为RDS实例时才有效,格式如:5.6或者5.7,默认为5.6
:type EngineVersion: str
"""
self.AccessKey = None
self.Ip = None
self.Port = None
self.User = None
self.Password = None
self.RdsInstanceId = None
self.CvmInstanceId = None
self.UniqDcgId = None
self.VpcId = None
self.SubnetId = None
self.UniqVpnGwId = None
self.InstanceId = None
self.Region = None
self.Supplier = None
self.CcnId = None
self.EngineVersion = None
def _deserialize(self, params):
self.AccessKey = params.get("AccessKey")
self.Ip = params.get("Ip")
self.Port = params.get("Port")
self.User = params.get("User")
self.Password = params.get("Password")
self.RdsInstanceId = params.get("RdsInstanceId")
self.CvmInstanceId = params.get("CvmInstanceId")
self.UniqDcgId = params.get("UniqDcgId")
self.VpcId = params.get("VpcId")
self.SubnetId = params.get("SubnetId")
self.UniqVpnGwId = params.get("UniqVpnGwId")
self.InstanceId = params.get("InstanceId")
self.Region = params.get("Region")
self.Supplier = params.get("Supplier")
self.CcnId = params.get("CcnId")
self.EngineVersion = params.get("EngineVersion")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class StartMigrateJobRequest(AbstractModel):
"""StartMigrateJob请求参数结构体
"""
def __init__(self):
"""
:param JobId: 数据迁移任务ID
:type JobId: str
"""
self.JobId = None
def _deserialize(self, params):
self.JobId = params.get("JobId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class StartMigrateJobResponse(AbstractModel):
"""StartMigrateJob返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class StartSyncJobRequest(AbstractModel):
"""StartSyncJob请求参数结构体
"""
def __init__(self):
"""
:param JobId: 灾备同步任务ID
:type JobId: str
"""
self.JobId = None
def _deserialize(self, params):
self.JobId = params.get("JobId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class StartSyncJobResponse(AbstractModel):
"""StartSyncJob返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class StopMigrateJobRequest(AbstractModel):
"""StopMigrateJob请求参数结构体
"""
def __init__(self):
"""
:param JobId: 数据迁移任务ID
:type JobId: str
"""
self.JobId = None
def _deserialize(self, params):
self.JobId = params.get("JobId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class StopMigrateJobResponse(AbstractModel):
"""StopMigrateJob返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class SubscribeInfo(AbstractModel):
"""订阅实例信息
"""
def __init__(self):
"""
:param SubscribeId: 数据订阅的实例ID
:type SubscribeId: str
:param SubscribeName: 数据订阅实例的名称
:type SubscribeName: str
:param ChannelId: 数据订阅实例绑定的通道ID
:type ChannelId: str
:param Product: 数据订阅绑定实例对应的产品名称
:type Product: str
:param InstanceId: 数据订阅实例绑定的数据库实例ID
:type InstanceId: str
:param InstanceStatus: 数据订阅实例绑定的数据库实例状态
:type InstanceStatus: str
:param SubsStatus: 数据订阅实例的配置状态,unconfigure - 未配置, configuring - 配置中,configured - 已配置
:type SubsStatus: str
:param ModifyTime: 上次修改时间
:type ModifyTime: str
:param CreateTime: 创建时间
:type CreateTime: str
:param IsolateTime: 隔离时间
:type IsolateTime: str
:param ExpireTime: 到期时间
:type ExpireTime: str
:param OfflineTime: 下线时间
:type OfflineTime: str
:param ConsumeStartTime: 最近一次修改的消费时间起点,如果从未修改则为零值
:type ConsumeStartTime: str
:param Region: 数据订阅实例所属地域
:type Region: str
:param PayType: 计费方式,0 - 包年包月,1 - 按量计费
:type PayType: int
:param Vip: 数据订阅实例的Vip
:type Vip: str
:param Vport: 数据订阅实例的Vport
:type Vport: int
:param UniqVpcId: 数据订阅实例Vip所在VPC的唯一ID
:type UniqVpcId: str
:param UniqSubnetId: 数据订阅实例Vip所在子网的唯一ID
:type UniqSubnetId: str
:param Status: 数据订阅实例的状态,creating - 创建中,normal - 正常运行,isolating - 隔离中,isolated - 已隔离,offlining - 下线中,offline - 已下线
:type Status: str
:param SdkConsumedTime: SDK最后一条确认消息的时间戳,如果SDK一直消费,也可以作为SDK当前消费时间点
:type SdkConsumedTime: str
:param Tags: 标签
注意:此字段可能返回 null,表示取不到有效值。
:type Tags: list of TagItem
:param AutoRenewFlag: 自动续费标识。0-不自动续费,1-自动续费
注意:此字段可能返回 null,表示取不到有效值。
:type AutoRenewFlag: int
:param SubscribeVersion: 订阅实例版本;txdts-旧版数据订阅,kafka-kafka版本数据订阅
注意:此字段可能返回 null,表示取不到有效值。
:type SubscribeVersion: str
"""
self.SubscribeId = None
self.SubscribeName = None
self.ChannelId = None
self.Product = None
self.InstanceId = None
self.InstanceStatus = None
self.SubsStatus = None
self.ModifyTime = None
self.CreateTime = None
self.IsolateTime = None
self.ExpireTime = None
self.OfflineTime = None
self.ConsumeStartTime = None
self.Region = None
self.PayType = None
self.Vip = None
self.Vport = None
self.UniqVpcId = None
self.UniqSubnetId = None
self.Status = None
self.SdkConsumedTime = None
self.Tags = None
self.AutoRenewFlag = None
self.SubscribeVersion = None
def _deserialize(self, params):
self.SubscribeId = params.get("SubscribeId")
self.SubscribeName = params.get("SubscribeName")
self.ChannelId = params.get("ChannelId")
self.Product = params.get("Product")
self.InstanceId = params.get("InstanceId")
self.InstanceStatus = params.get("InstanceStatus")
self.SubsStatus = params.get("SubsStatus")
self.ModifyTime = params.get("ModifyTime")
self.CreateTime = params.get("CreateTime")
self.IsolateTime = params.get("IsolateTime")
self.ExpireTime = params.get("ExpireTime")
self.OfflineTime = params.get("OfflineTime")
self.ConsumeStartTime = params.get("ConsumeStartTime")
self.Region = params.get("Region")
self.PayType = params.get("PayType")
self.Vip = params.get("Vip")
self.Vport = params.get("Vport")
self.UniqVpcId = params.get("UniqVpcId")
self.UniqSubnetId = params.get("UniqSubnetId")
self.Status = params.get("Status")
self.SdkConsumedTime = params.get("SdkConsumedTime")
if params.get("Tags") is not None:
self.Tags = []
for item in params.get("Tags"):
obj = TagItem()
obj._deserialize(item)
self.Tags.append(obj)
self.AutoRenewFlag = params.get("AutoRenewFlag")
self.SubscribeVersion = params.get("SubscribeVersion")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class SubscribeObject(AbstractModel):
"""数据数据订阅的对象
"""
def __init__(self):
"""
:param ObjectsType: 数据订阅对象的类型,0-数据库,1-数据库内的表
注意:此字段可能返回 null,表示取不到有效值。
:type ObjectsType: int
:param DatabaseName: 订阅数据库的名称
注意:此字段可能返回 null,表示取不到有效值。
:type DatabaseName: str
:param TableNames: 订阅数据库中表名称数组
注意:此字段可能返回 null,表示取不到有效值。
:type TableNames: list of str
"""
self.ObjectsType = None
self.DatabaseName = None
self.TableNames = None
def _deserialize(self, params):
self.ObjectsType = params.get("ObjectsType")
self.DatabaseName = params.get("DatabaseName")
self.TableNames = params.get("TableNames")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class SubscribeRegionConf(AbstractModel):
"""数据订阅地域售卖信息
"""
def __init__(self):
"""
:param RegionName: 地域名称,如广州
注意:此字段可能返回 null,表示取不到有效值。
:type RegionName: str
:param Region: 地区标识,如ap-guangzhou
注意:此字段可能返回 null,表示取不到有效值。
:type Region: str
:param Area: 地域名称,如华南地区
注意:此字段可能返回 null,表示取不到有效值。
:type Area: str
:param IsDefaultRegion: 是否为默认地域,0 - 不是,1 - 是的
注意:此字段可能返回 null,表示取不到有效值。
:type IsDefaultRegion: int
:param Status: 当前地域的售卖情况,1 - 正常, 2-灰度,3 - 停售
注意:此字段可能返回 null,表示取不到有效值。
:type Status: int
"""
self.RegionName = None
self.Region = None
self.Area = None
self.IsDefaultRegion = None
self.Status = None
def _deserialize(self, params):
self.RegionName = params.get("RegionName")
self.Region = params.get("Region")
self.Area = params.get("Area")
self.IsDefaultRegion = params.get("IsDefaultRegion")
self.Status = params.get("Status")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class SwitchDrToMasterRequest(AbstractModel):
"""SwitchDrToMaster请求参数结构体
"""
def __init__(self):
"""
:param DstInfo: 灾备实例的信息
:type DstInfo: :class:`tencentcloud.dts.v20180330.models.SyncInstanceInfo`
:param DatabaseType: 数据库的类型 (如 mysql)
:type DatabaseType: str
"""
self.DstInfo = None
self.DatabaseType = None
def _deserialize(self, params):
if params.get("DstInfo") is not None:
self.DstInfo = SyncInstanceInfo()
self.DstInfo._deserialize(params.get("DstInfo"))
self.DatabaseType = params.get("DatabaseType")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class SwitchDrToMasterResponse(AbstractModel):
"""SwitchDrToMaster返回参数结构体
"""
def __init__(self):
"""
:param AsyncRequestId: 后台异步任务请求id
:type AsyncRequestId: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.AsyncRequestId = None
self.RequestId = None
def _deserialize(self, params):
self.AsyncRequestId = params.get("AsyncRequestId")
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class SyncCheckStepInfo(AbstractModel):
"""灾备任务校验步骤
"""
def __init__(self):
"""
:param StepNo: 步骤序列
:type StepNo: int
:param StepName: 步骤展现名称
:type StepName: str
:param StepCode: 步骤执行结果代码
:type StepCode: int
:param StepMessage: 步骤执行结果提示
:type StepMessage: str
"""
self.StepNo = None
self.StepName = None
self.StepCode = None
self.StepMessage = None
def _deserialize(self, params):
self.StepNo = params.get("StepNo")
self.StepName = params.get("StepName")
self.StepCode = params.get("StepCode")
self.StepMessage = params.get("StepMessage")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class SyncDetailInfo(AbstractModel):
"""描述详细同步任务过程
"""
def __init__(self):
"""
:param StepAll: 总步骤数
:type StepAll: int
:param StepNow: 当前步骤
:type StepNow: int
:param Progress: 总进度
:type Progress: str
:param CurrentStepProgress: 当前步骤进度
:type CurrentStepProgress: str
:param MasterSlaveDistance: 主从差距,MB
:type MasterSlaveDistance: int
:param SecondsBehindMaster: 主从差距,秒
:type SecondsBehindMaster: int
:param StepInfo: 步骤信息
:type StepInfo: list of SyncStepDetailInfo
"""
self.StepAll = None
self.StepNow = None
self.Progress = None
self.CurrentStepProgress = None
self.MasterSlaveDistance = None
self.SecondsBehindMaster = None
self.StepInfo = None
def _deserialize(self, params):
self.StepAll = params.get("StepAll")
self.StepNow = params.get("StepNow")
self.Progress = params.get("Progress")
self.CurrentStepProgress = params.get("CurrentStepProgress")
self.MasterSlaveDistance = params.get("MasterSlaveDistance")
self.SecondsBehindMaster = params.get("SecondsBehindMaster")
if params.get("StepInfo") is not None:
self.StepInfo = []
for item in params.get("StepInfo"):
obj = SyncStepDetailInfo()
obj._deserialize(item)
self.StepInfo.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class SyncInstanceInfo(AbstractModel):
"""灾备同步的实例信息,记录主实例或灾备实例的信息
"""
def __init__(self):
"""
:param Region: 地域英文名,如:ap-guangzhou
:type Region: str
:param InstanceId: 实例短ID
:type InstanceId: str
"""
self.Region = None
self.InstanceId = None
def _deserialize(self, params):
self.Region = params.get("Region")
self.InstanceId = params.get("InstanceId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class SyncJobInfo(AbstractModel):
"""灾备同步任务信息
"""
def __init__(self):
"""
:param JobId: 灾备任务id
:type JobId: str
:param JobName: 灾备任务名
:type JobName: str
:param SyncOption: 任务同步
:type SyncOption: :class:`tencentcloud.dts.v20180330.models.SyncOption`
:param SrcAccessType: 源接入类型
:type SrcAccessType: str
:param SrcDatabaseType: 源数据类型
:type SrcDatabaseType: str
:param SrcInfo: 源实例信息
:type SrcInfo: :class:`tencentcloud.dts.v20180330.models.SyncInstanceInfo`
:param DstAccessType: 灾备接入类型
:type DstAccessType: str
:param DstDatabaseType: 灾备数据类型
:type DstDatabaseType: str
:param DstInfo: 灾备实例信息
:type DstInfo: :class:`tencentcloud.dts.v20180330.models.SyncInstanceInfo`
:param Detail: 任务信息
:type Detail: :class:`tencentcloud.dts.v20180330.models.SyncDetailInfo`
:param Status: 任务状态
:type Status: int
:param DatabaseInfo: 迁移库表
:type DatabaseInfo: str
:param CreateTime: 创建时间
:type CreateTime: str
:param StartTime: 开始时间
:type StartTime: str
:param EndTime: 结束时间
:type EndTime: str
"""
self.JobId = None
self.JobName = None
self.SyncOption = None
self.SrcAccessType = None
self.SrcDatabaseType = None
self.SrcInfo = None
self.DstAccessType = None
self.DstDatabaseType = None
self.DstInfo = None
self.Detail = None
self.Status = None
self.DatabaseInfo = None
self.CreateTime = None
self.StartTime = None
self.EndTime = None
def _deserialize(self, params):
self.JobId = params.get("JobId")
self.JobName = params.get("JobName")
if params.get("SyncOption") is not None:
self.SyncOption = SyncOption()
self.SyncOption._deserialize(params.get("SyncOption"))
self.SrcAccessType = params.get("SrcAccessType")
self.SrcDatabaseType = params.get("SrcDatabaseType")
if params.get("SrcInfo") is not None:
self.SrcInfo = SyncInstanceInfo()
self.SrcInfo._deserialize(params.get("SrcInfo"))
self.DstAccessType = params.get("DstAccessType")
self.DstDatabaseType = params.get("DstDatabaseType")
if params.get("DstInfo") is not None:
self.DstInfo = SyncInstanceInfo()
self.DstInfo._deserialize(params.get("DstInfo"))
if params.get("Detail") is not None:
self.Detail = SyncDetailInfo()
self.Detail._deserialize(params.get("Detail"))
self.Status = params.get("Status")
self.DatabaseInfo = params.get("DatabaseInfo")
self.CreateTime = params.get("CreateTime")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class SyncOption(AbstractModel):
"""灾备同步任务配置选项
"""
def __init__(self):
"""
:param SyncObject: 同步对象,1-整个实例,2-指定库表
:type SyncObject: int
:param RunMode: 同步开始设置,1-立即开始
:type RunMode: int
:param SyncType: 同步模式, 3-全量且增量同步
:type SyncType: int
:param ConsistencyType: 数据一致性检测, 1-无需配置
:type ConsistencyType: int
"""
self.SyncObject = None
self.RunMode = None
self.SyncType = None
self.ConsistencyType = None
def _deserialize(self, params):
self.SyncObject = params.get("SyncObject")
self.RunMode = params.get("RunMode")
self.SyncType = params.get("SyncType")
self.ConsistencyType = params.get("ConsistencyType")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class SyncStepDetailInfo(AbstractModel):
"""同步任务进度
"""
def __init__(self):
"""
:param StepNo: 步骤编号
:type StepNo: int
:param StepName: 步骤名
:type StepName: str
:param CanStop: 能否中止
:type CanStop: int
:param StepId: 步骤号
:type StepId: int
"""
self.StepNo = None
self.StepName = None
self.CanStop = None
self.StepId = None
def _deserialize(self, params):
self.StepNo = params.get("StepNo")
self.StepName = params.get("StepName")
self.CanStop = params.get("CanStop")
self.StepId = params.get("StepId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class TagFilter(AbstractModel):
"""标签过滤
"""
def __init__(self):
"""
:param TagKey: 标签键值
:type TagKey: str
:param TagValue: 标签值
:type TagValue: list of str
"""
self.TagKey = None
self.TagValue = None
def _deserialize(self, params):
self.TagKey = params.get("TagKey")
self.TagValue = params.get("TagValue")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class TagItem(AbstractModel):
"""标签
"""
def __init__(self):
"""
:param TagKey: 标签键值
:type TagKey: str
:param TagValue: 标签值
注意:此字段可能返回 null,表示取不到有效值。
:type TagValue: str
"""
self.TagKey = None
self.TagValue = None
def _deserialize(self, params):
self.TagKey = params.get("TagKey")
self.TagValue = params.get("TagValue")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
| 44,150 | 0 | 2,160 |
76c8eb89e8c19b860d88bb86c31cd4003a856411 | 1,412 | py | Python | interview-preparation-kit/dictionaries-and-hashmaps/count-triplets/count-triplets-v1.py | victorwongth/hackerrank-solutions | b3125204e7ff9c83efb2417c7dab3630a6dc7165 | [
"MIT"
] | null | null | null | interview-preparation-kit/dictionaries-and-hashmaps/count-triplets/count-triplets-v1.py | victorwongth/hackerrank-solutions | b3125204e7ff9c83efb2417c7dab3630a6dc7165 | [
"MIT"
] | null | null | null | interview-preparation-kit/dictionaries-and-hashmaps/count-triplets/count-triplets-v1.py | victorwongth/hackerrank-solutions | b3125204e7ff9c83efb2417c7dab3630a6dc7165 | [
"MIT"
] | null | null | null | # https://www.hackerrank.com/challenges/count-triplets-1/problem
# This solution works but will time out
#!/bin/python3
import math
import os
import random
import re
import sys
from collections import defaultdict
# Complete the countTriplets function below.
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
nr = input().rstrip().split()
n = int(nr[0])
r = int(nr[1])
arr = list(map(int, input().rstrip().split()))
ans = countTriplets(arr, r)
fptr.write(str(ans) + '\n')
fptr.close()
| 25.672727 | 76 | 0.592068 | # https://www.hackerrank.com/challenges/count-triplets-1/problem
# This solution works but will time out
#!/bin/python3
import math
import os
import random
import re
import sys
from collections import defaultdict
# Complete the countTriplets function below.
def countTriplets(arr, r):
# First count occurence of each int and store its index in this format
# d = {'1': [0, 2, 3], '4': [1, 5], ...}
values = defaultdict(lambda: [])
for i in range(len(arr)):
values[str(arr[i])].append(i)
# Then for each value check whether
# 1. the value * r and value * r * r exist
# 2. whether those values have indexes larger than that of current value
ans = 0
for value, indexes in values.items():
r1 = int(value) * r
r2 = int(value) * r * r
if str(r1) not in values.keys() or str(r2) not in values.keys():
continue
for index in indexes:
for r1_index in values[str(r1)]:
for r2_index in values[str(r2)]:
if r2_index > r1_index and r1_index > index:
ans += 1
return ans
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
nr = input().rstrip().split()
n = int(nr[0])
r = int(nr[1])
arr = list(map(int, input().rstrip().split()))
ans = countTriplets(arr, r)
fptr.write(str(ans) + '\n')
fptr.close()
| 841 | 0 | 22 |
d80dc03d4bc0d8cecdd2b7e8d49958c94ddb8c4d | 52 | py | Python | kitsune/wiki/__init__.py | AndrewDVXI/kitsune | 84bd4fa60346681c3fc5a03b0b1540fd1335cee2 | [
"BSD-3-Clause"
] | 929 | 2015-01-04T08:08:51.000Z | 2022-03-31T06:20:44.000Z | kitsune/wiki/__init__.py | hafixo/kitsune | d7756872e16590eea1c6adaeb5bc78f83414d753 | [
"BSD-3-Clause"
] | 1,751 | 2015-01-02T00:04:37.000Z | 2022-03-31T10:24:30.000Z | kitsune/wiki/__init__.py | Whoerr/kitsune | 2428573b4920a824c3e712b8a4870f8c1ada8f64 | [
"BSD-3-Clause"
] | 605 | 2015-01-01T14:08:36.000Z | 2022-03-28T15:39:45.000Z | default_app_config = "kitsune.wiki.apps.WikiConfig"
| 26 | 51 | 0.826923 | default_app_config = "kitsune.wiki.apps.WikiConfig"
| 0 | 0 | 0 |
d87f10d1aaea2f6d2a753d7e0c53bcd18dac070c | 1,086 | py | Python | database/migrations/0043_auto_20210111_2318.py | ORC-RIS/beiwe-backend | af2c43f79350bf0fc1ce8efafab1ac9c40008c40 | [
"BSD-3-Clause"
] | 51 | 2018-01-12T01:51:44.000Z | 2022-03-10T00:06:34.000Z | database/migrations/0043_auto_20210111_2318.py | ORC-RIS/beiwe-backend | af2c43f79350bf0fc1ce8efafab1ac9c40008c40 | [
"BSD-3-Clause"
] | 213 | 2018-01-10T19:57:03.000Z | 2022-03-29T04:30:53.000Z | database/migrations/0043_auto_20210111_2318.py | ORC-RIS/beiwe-backend | af2c43f79350bf0fc1ce8efafab1ac9c40008c40 | [
"BSD-3-Clause"
] | 35 | 2018-03-08T03:28:53.000Z | 2022-03-11T13:58:44.000Z | # Generated by Django 2.2.14 on 2021-01-11 23:18
from django.db import migrations, models
import django.utils.timezone
| 29.351351 | 93 | 0.607735 | # Generated by Django 2.2.14 on 2021-01-11 23:18
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('database', '0042_auto_20201204_2223'),
]
operations = [
migrations.AddField(
model_name='intervention',
name='created_on',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='intervention',
name='last_updated',
field=models.DateTimeField(auto_now=True),
),
migrations.AddField(
model_name='interventiondate',
name='created_on',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='interventiondate',
name='last_updated',
field=models.DateTimeField(auto_now=True),
),
]
| 0 | 942 | 23 |
86010bad3e726db8bdb9130199a575bb8a5e2ad0 | 1,585 | py | Python | tools/ci_new_boards_check.py | MakeItZone/circuitpython | 7f803c0b51c333210ed267502422ed7bb28b9be7 | [
"Unlicense",
"BSD-3-Clause",
"MIT-0",
"MIT"
] | 13 | 2021-01-27T13:12:17.000Z | 2022-02-25T20:16:00.000Z | tools/ci_new_boards_check.py | MakeItZone/circuitpython | 7f803c0b51c333210ed267502422ed7bb28b9be7 | [
"Unlicense",
"BSD-3-Clause",
"MIT-0",
"MIT"
] | 1 | 2020-12-30T11:48:24.000Z | 2020-12-30T11:48:24.000Z | tools/ci_new_boards_check.py | MakeItZone/circuitpython | 7f803c0b51c333210ed267502422ed7bb28b9be7 | [
"Unlicense",
"BSD-3-Clause",
"MIT-0",
"MIT"
] | 6 | 2021-05-12T20:45:46.000Z | 2021-12-27T04:51:56.000Z | #! /usr/bin/env python3
# SPDX-FileCopyrightText: 2014 MicroPython & CircuitPython contributors (https://github.com/adafruit/circuitpython/graphs/contributors)
#
# SPDX-License-Identifier: MIT
import sys
import os
import json
import yaml
import build_board_info
workflow_file = '.github/workflows/build.yml'
# Get boards in json format
boards_info_json = build_board_info.get_board_mapping()
# Get all the boards out of the json format
info_boards = [board for board in boards_info_json.keys() if not boards_info_json[board].get("alias", False)]
# We need to know the path of the workflow file
base_path = os.path.dirname(__file__)
yml_path = os.path.abspath(os.path.join(base_path, '..', workflow_file))
# Loading board list based on build jobs in the workflow file.
ci_boards = []
with open(yml_path, "r") as f:
workflow = yaml.safe_load(f)
ok = True
for job in workflow["jobs"]:
if not job.startswith("build"):
continue
job_boards = workflow["jobs"][job]["strategy"]["matrix"]["board"]
if job_boards != sorted(job_boards):
print("Boards for job \"{}\" not sorted. Must be:".format(job))
print(" - \"" + "\"\n - \"".join(sorted(job_boards)) + "\"")
ok = False
ci_boards.extend(job_boards)
# All the travis_boards elements must be on info_boards
info_boards.sort()
ci_boards.sort()
missing_boards = set(info_boards) - set(ci_boards)
if missing_boards:
ok = False
print('Boards missing in {}:'.format(workflow_file))
for board in missing_boards:
print(board)
if not ok:
sys.exit(1)
| 28.303571 | 135 | 0.699685 | #! /usr/bin/env python3
# SPDX-FileCopyrightText: 2014 MicroPython & CircuitPython contributors (https://github.com/adafruit/circuitpython/graphs/contributors)
#
# SPDX-License-Identifier: MIT
import sys
import os
import json
import yaml
import build_board_info
workflow_file = '.github/workflows/build.yml'
# Get boards in json format
boards_info_json = build_board_info.get_board_mapping()
# Get all the boards out of the json format
info_boards = [board for board in boards_info_json.keys() if not boards_info_json[board].get("alias", False)]
# We need to know the path of the workflow file
base_path = os.path.dirname(__file__)
yml_path = os.path.abspath(os.path.join(base_path, '..', workflow_file))
# Loading board list based on build jobs in the workflow file.
ci_boards = []
with open(yml_path, "r") as f:
workflow = yaml.safe_load(f)
ok = True
for job in workflow["jobs"]:
if not job.startswith("build"):
continue
job_boards = workflow["jobs"][job]["strategy"]["matrix"]["board"]
if job_boards != sorted(job_boards):
print("Boards for job \"{}\" not sorted. Must be:".format(job))
print(" - \"" + "\"\n - \"".join(sorted(job_boards)) + "\"")
ok = False
ci_boards.extend(job_boards)
# All the travis_boards elements must be on info_boards
info_boards.sort()
ci_boards.sort()
missing_boards = set(info_boards) - set(ci_boards)
if missing_boards:
ok = False
print('Boards missing in {}:'.format(workflow_file))
for board in missing_boards:
print(board)
if not ok:
sys.exit(1)
| 0 | 0 | 0 |
76c575b41eb793e9db73c54281f26f3309864947 | 599 | py | Python | setup.py | lmarzora/pyspatialite | 173ae3e8a9f2523adbaa2cefb23195f9d32aae18 | [
"Zlib"
] | null | null | null | setup.py | lmarzora/pyspatialite | 173ae3e8a9f2523adbaa2cefb23195f9d32aae18 | [
"Zlib"
] | null | null | null | setup.py | lmarzora/pyspatialite | 173ae3e8a9f2523adbaa2cefb23195f9d32aae18 | [
"Zlib"
] | null | null | null | #! /usr/bin/env python
from setuptools import setup, find_packages
setup(
name = 'pyspatialite',
version = '0.2',
packages=['pyspatialite'],
description = 'DB-API 2.0 interface for SQLite 3.x with Spatialite',
author = 'Lokkju Brennr',
author_email = 'lokkju@lokkju.com',
license = 'zlib/libpng license',
platforms = 'ALL',
url = 'https://github.com/lokkju/pyspatialite/',
# no download_url, since pypi hosts it!
#download_url = 'http://code.google.com/p/pyspatialite/downloads/list',
package_data={
'pyspatialite': ['*.dll', '*.pyd'],
}
) | 33.277778 | 75 | 0.644407 | #! /usr/bin/env python
from setuptools import setup, find_packages
setup(
name = 'pyspatialite',
version = '0.2',
packages=['pyspatialite'],
description = 'DB-API 2.0 interface for SQLite 3.x with Spatialite',
author = 'Lokkju Brennr',
author_email = 'lokkju@lokkju.com',
license = 'zlib/libpng license',
platforms = 'ALL',
url = 'https://github.com/lokkju/pyspatialite/',
# no download_url, since pypi hosts it!
#download_url = 'http://code.google.com/p/pyspatialite/downloads/list',
package_data={
'pyspatialite': ['*.dll', '*.pyd'],
}
) | 0 | 0 | 0 |
2e156082d1f30393329123b4788d9175bae86f19 | 39,794 | py | Python | src/mpeg1audio/__init__.py | sbrubes/mpeg1audio | 2feb3a53861f623bab56280c06ab1fedf6804f6c | [
"BSD-2-Clause-FreeBSD"
] | 3 | 2016-04-06T04:39:04.000Z | 2020-04-23T22:55:58.000Z | src/mpeg1audio/__init__.py | sbrubes/mpeg1audio | 2feb3a53861f623bab56280c06ab1fedf6804f6c | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | src/mpeg1audio/__init__.py | sbrubes/mpeg1audio | 2feb3a53861f623bab56280c06ab1fedf6804f6c | [
"BSD-2-Clause-FreeBSD"
] | 2 | 2015-04-30T22:35:47.000Z | 2020-10-03T04:25:30.000Z | """
Python package which is intended to gather all kinds of MPEG-1 Audio related
meta information from file. Such as duration of MPEGAudio file, average bitrate
for variable bitrate (VBR) MPEGAudio files, etc.
Most of the information about MPEGAudio Headers is from excellent article
`MPEGAudio Audio Frame Header By Konrad Windszus, in Code Project
<http://www.codeproject.com/KB/audio-video/mpegaudioinfo.aspx#MPEGAudioFrame>`_.
If you are solely interested on details of MPEGAudio headers that is a good
place to start. Unit tests (:file:`tests/` -directory) are matched against the
MPEGAudioInfo.exe provided in that project.
Notable feature of mpeg1audio is the fact that it :doc:`tries to parse
lazily</laziness>`. It doesn't parse all frames, or ending unless really needed.
.. todo:: Free bitrate, this should be simple to implement, though I haven't yet
found any free bitrate files which to test against.
.. todo:: Table of contents for VBR, this is not high on priority list since we
don't need to seek the MPEGAudio really.
Usage example
-------------
>>> import mpeg1audio
>>> try:
... mp3 = mpeg1audio.MPEGAudio(open('data/song.mp3', 'rb'))
... except mpeg1audio.MPEGAudioHeaderException:
... pass
... else:
... print mp3.duration
0:03:12
Why the exception? It may seem unnecessary, but it has a purpose so that there
cannot be *empty* MPEGAudio instances, those are more infuriating than the
handling of exception.
"""
import os
from mpeg1audio.utils import FileOpener
__version__ = "0.5.5"
__release__ = "0.5.5 alpha"
__copyright__ = "Jari Pennanen, 2010"
__description__ = "MPEG-1 Audio package"
__author__ = "Jari Pennanen"
__license__ = "FreeBSD, see COPYING"
# Pylint disable settings:
# ------------------------
# ToDos, DocStrings:
# pylint: disable-msg=W0511,W0105
#
# Unused variable, argument:
# pylint: disable-msg=W0612,W0613
#
# Re-define built-in:
# pylint: disable-msg=W0622
#
# Protected member access:
# pylint: disable-msg=W0212
#
# Too many lines per module:
# pylint: disable-msg=C0302
#
# Too many instance attributes, Too few public methods:
# pylint: disable-msg=R0902,R0903
#
# TODO: LOW: I don't like the verboseness of EpyDoc syntax, maybe change to
# reStructuredText?
from datetime import timedelta
from mpeg1audio import headers
from mpeg1audio import utils
from headers import MPEGAudioHeaderEOFException, MPEGAudioHeaderException
import math
import struct
__all__ = ['MPEGAudioFrameBase', 'MPEGAudioFrameIterator', 'MPEGAudioFrame',
'MPEGAudio', 'MPEGAudioHeaderException',
'MPEGAudioHeaderEOFException', 'PARSE_ALL_CHUNK_SIZE', 'headers',
'utils', 'vbri', 'xing']
PARSE_ALL_CHUNK_SIZE = 153600
"""Chunk size of parsing all frames.
:type: int"""
class MPEGAudioFrameBase(object):
"""MPEGAudio frame base, should not be instated, only inherited.
Variables defined here are constant through out the frames of
:class:`MPEGAudio`.
"""
class MPEGAudioFrame(MPEGAudioFrameBase):
"""MPEGAudio *Frame* meta data."""
def get_forward_iterator(self, file, chunk_size=None):
"""Get forward iterator from this position.
:param file: File object
:type file: file object
:param chunk_size: Chunked reading size, ``None`` defaults to
:const:`mpeg1audio.utils.DEFAULT_CHUNK_SIZE`.
:type chunk_size: int
:return: Generator that iterates forward from this frame.
:rtype: generator of :class:`MPEGAudioFrame`
"""
# TODO: LOW: Free bitrate.
next_frame_offset = self.offset + self.size
chunks = utils.chunked_reader(file, start_position=next_frame_offset,
chunk_size=chunk_size)
return MPEGAudioFrame.parse_consecutive(next_frame_offset, chunks)
# def get_backward_iterator(self, file):
# # TODO: LOW: Backward iterator
# raise NotImplementedError('Backward iteration not implemented!')
@classmethod
def find_and_parse(cls, file, max_frames=3, chunk_size=None, #IGNORE:R0913
begin_frame_search= -1, lazily_after=1,
max_chunks= -1, max_consecutive_chunks= -1):
"""Find and parse from file.
:param file: File object being searched.
:type file: file object
:param max_frames: Maximum of frames returned. Defaults to ``3``.
``None`` means give all frames as lazy generator.
:type max_frames: int, or None
:param chunk_size: Size of chunked reading. Defaults to
:const:`utils.DEFAULT_CHUNK_SIZE`, minimum ``4``.
:type chunk_size: int
:param begin_frame_search: Begin frame search from this position in
file. Defaults to ``-1``, meaning continue where file pointer has
left.
:type begin_frame_search: int
:param lazily_after: Check also next header(s), before becoming
lazy generator. Defaults to ``1``.
:type lazily_after: int
:param max_chunks: Maximum amount of chunks the chunked reader can
yield. ``-1`` means infinity, and can be looped to end of file.
:type max_chunks: int
:param max_consecutive_chunks: Maximum of *consecutive* chunks in
returned lazy generator. ``-1`` means infinity, and can be looped to
end of file.
:type max_consecutive_chunks: int
"""
chunk_size = chunk_size or utils.DEFAULT_CHUNK_SIZE
chunk_size = max(chunk_size, 4)
chunks = utils.chunked_reader(file, chunk_size=chunk_size,
start_position=begin_frame_search,
max_chunks=max_chunks)
for chunk_offset, chunk in chunks:
for found in utils.find_all_overlapping(chunk, chr(255)):
consecutive_chunks = \
utils.chunked_reader(file,
chunk_size=chunk_size,
start_position=chunk_offset + found,
max_chunks=max_consecutive_chunks)
frames = MPEGAudioFrame.parse_consecutive(chunk_offset + found,
consecutive_chunks)
try:
return utils.genlimit(frames, lazily_after + 1, max_frames)
except ValueError:
pass
return iter([])
@classmethod
def parse_consecutive(cls, header_offset, chunks):
"""Parse consecutive MPEGAudio Frame headers.
Parses from given position until header parsing error, or end of chunks.
:param header_offset: Header offset *within a file*.
:type header_offset: int
:param chunks: Generator yielding more chunks when *End of Chunk* is
reached.
:type chunks: generator, or list
:return: Generator yielding MPEGAudio frames.
:rtype: generator of :class:`MPEGFrame`
:see: :func:`utils.chunked_reader()`
"""
previous_mpegframe = None
previous_mpegframe_offset = None
previous_chunk = ""
next_mpegframe_offset = header_offset
for next_chunk_offset, next_chunk in chunks:
# Get 4 bytes from previous chunk
previous_chunk_end = previous_chunk[-4:]
# Join the 4 bytes, if there were any, to tested chunk
chunk = previous_chunk_end + next_chunk
chunk_offset = next_chunk_offset - len(previous_chunk_end)
# Yield all frames in chunk
while True:
if (previous_mpegframe is not None) and \
(previous_mpegframe_offset is not None):
if previous_mpegframe.size is None:
return
# TODO: LOW: Free bitrate, you must search for the
# second frame.
next_mpegframe_offset = previous_mpegframe_offset + \
previous_mpegframe.size
next_mpegframe = None
next_header_offset = next_mpegframe_offset - chunk_offset
# Get header bytes within chunk
try:
header_bytes = headers.get_bytes(next_header_offset, chunk)
except MPEGAudioHeaderEOFException:
# We need next chunk, end of this chunk was reached
break
# Parse and append if parseable
try:
next_mpegframe = MPEGAudioFrame.parse(header_bytes)
except MPEGAudioHeaderException:
return
else:
# Frame was parsed successfully
next_mpegframe.offset = next_mpegframe_offset
yield next_mpegframe
previous_mpegframe_offset = next_mpegframe_offset
previous_mpegframe = next_mpegframe
previous_chunk = next_chunk
return
@classmethod
def parse(cls, bytes):
"""Tries to create MPEGAudio Frame from given bytes.
:param bytes: MPEGAudio Header bytes. Usually obtained with
:func:`headers.get_bytes`
:type bytes: int
:rtype: :class:`MPEGAudioFrame`
:return: MPEGAudio Frame
:raise headers.MPEGAudioHeaderException: Raised if MPEGAudio Frame
cannot be parsed.
"""
# TODO: LOW: CRC, verify and parse.
# http://www.codeproject.com/KB/audio-video/mpegaudioinfo.aspx#CRC
# Header synchronization bits
headers.check_sync_bits((bytes >> 21) & 2047)
# Header parseable information
mpeg_version_bits = (bytes >> 19) & 3
layer_bits = (bytes >> 17) & 3
protection_bit = (bytes >> 16) & 1
bitrate_bits = (bytes >> 12) & 15
samplerate_bits = (bytes >> 10) & 3
padding_bit = (bytes >> 9) & 1
private_bit = (bytes >> 8) & 1
mode_bits = (bytes >> 6) & 3
mode_extension_bits = (bytes >> 4) & 3
copyright_bit = (bytes >> 3) & 1
original_bit = (bytes >> 2) & 1
emphasis_bits = (bytes >> 0) & 3
self = MPEGAudioFrame()
self.version = headers.get_mpeg_version(mpeg_version_bits)
self.layer = headers.get_layer(layer_bits)
self.bitrate = headers.get_bitrate(self.version, self.layer,
bitrate_bits)
self.sample_rate = headers.get_sample_rate(self.version,
samplerate_bits)
self.channel_mode = headers.get_channel_mode(mode_bits)
self.channel_mode_extension = \
headers.get_channel_mode_ext(self.layer, mode_extension_bits)
self.emphasis = headers.get_emphasis(emphasis_bits)
self._padding_size = padding_bit
self.is_private = private_bit == 1
self.is_copyrighted = copyright_bit == 1
self.is_original = original_bit == 1
self.is_protected = protection_bit == 1
# Non-header parseable information
self.samples_per_frame = headers.get_samples_per_frame(self.version,
self.layer)
self.size = headers.get_frame_size(self.version, self.layer,
self.sample_rate, self.bitrate,
self._padding_size)
return self
class MPEGAudioFrameIterator(object):
"""MPEGAudio Frame iterator, for lazy evaluation."""
def __init__(self, mpeg, begin_frames, end_frames):
"""
:param mpeg: MPEGAudio Which frames are to be iterated over.
:type mpeg: :class:`MPEGAudio`
:param begin_frames: First frames of MPEGAudio.
:type begin_frames: lambda: [:class:`MPEGAudioFrame`, ...]
:param end_frames: End frames of MPEGAudio.
:type end_frames: lambda: [:class:`MPEGAudioFrame`, ...]
"""
self.mpeg = mpeg
"""MPEGAudio which frames are iterated.
:type: :class:`MPEGAudio`
"""
self._begin_frames = begin_frames
"""Begin frames.
:type: list of :class:`MPEGAudioFrame`
"""
self._end_frames = end_frames
"""End frames.
:type: list of :class:`MPEGAudioFrame`, or None
"""
self._has_parsed_all = False
"""Has parsing all occurred?
:type: bool
"""
self._has_parsed_beginning = not callable(self._begin_frames)
"""Has parsing beginning occurred?
:type: bool
"""
self._has_parsed_ending = not callable(self._end_frames)
"""Has parsing end occurred?
:type: bool
"""
def parse_all(self, force=False):
"""Parse all frames.
:see: :func:`MPEGAudio.parse_all`
"""
# TODO: LOW: How do we deal corrupted MPEGAudio files?
# Where some frames are misplaced, etc?
if self._has_parsed_all and not force:
# TODO: DEBUG!
raise NotImplementedError('This should not happen, ever!')
# return
avg_bitrate = 0
index = -1
for index, frame in enumerate(self):
avg_bitrate += frame.bitrate
# Close for now
self.mpeg.close()
frame_count = index + 1
bitrate = avg_bitrate / frame_count
# Set MPEGAudio values
self.mpeg.frame_count = frame_count
self.mpeg.bitrate = bitrate
# Set has parsed all
self._has_parsed_all = True
# def __reversed__(self):
# # TODO: LOW: Backward iterator
# pass
class MPEGAudio(MPEGAudioFrameBase):
"""
Parses MPEGAudio file meta data.
Uses Xing and VBRI headers if neccessary, for better performance with VBR
files. VBR files that doesn't have those headers the file must parse all
frames.
"""
_file = FileOpener(mode='rb')
"""Opens the file when needed"""
def __init__(self, file, begin_start_looking=0, ending_start_looking=0,
mpeg_test=True):
"""
.. todo:: If given filename, create file and close it always automatically
when not needed.
:param file: File handle returned e.g. by open(). Alternatively path to
file which to open on request.
:type file: file object, or string
:param begin_start_looking: Start position of MPEGAudio header search.
For example if you know that file has ID3v2, it is adviced to give
the size of ID3v2 tag to this field.
Value *must be equal or lesser than* (<=) the beginning of
MPEGAudio. If the given value exceeds the first header, the given
MPEGAudio might be incorrect.
:type begin_start_looking: int
:param ending_start_looking: End position of MPEGAudio *relative to end
of file*. For example if you know that file has ID3v1 footer, give
``128``, the size of ID3v1, this ensures that we can *at least* skip
over that.
Value *must be equal or lesser than* (<=) end of the last
MPEGAudio header.
:type ending_start_looking: int
:param mpeg_test: Do mpeg test first before continuing with parsing the
beginning. This is useful especially if there is even slight
possibility that given file is not MPEGAudio, we can rule them out
fast.
:type mpeg_test: bool
:raise headers.MPEGAudioHeaderException: Raised if header cannot be
found.
"""
super(MPEGAudio, self).__init__()
self._filepath = None
"""File path
type: String, unicode, or :const:`None`
"""
self._filehandle = None
"""File handle when instiated using path to file.
type: File object, or :const:`None`
"""
# If instiated using path to file
if isinstance(file, (str, unicode)):
self._filepath = file
# Open the file
try:
file = open(file, "rb")
except (IOError, os.error):
raise MPEGAudioHeaderException(
'File %s cannot be opened' % file)
self._filehandle = file
# If instiated using file object
else:
self._file = file
"""File object.
:type: file object
"""
self.is_vbr = False
"""Is variable bitrate?
type: bool
"""
self.filesize = utils.get_filesize(file)
"""Filesize in bytes.
:type: int
"""
self.xing = None
"""XING Header, if any.
:type: :class:`XING`, or None
"""
self.vbri = None
"""VBRI Header, if any.
:type: :class:`VBRI`, or None
"""
self.frames = None
"""All MPEGAudio frames.
:type: iterator for :class:`MPEGAudioFrame`
"""
self._frame_count = None
self._frame_size = None
self._size = None
self._duration = None
self._bitrate = None
self._begin_start_looking = begin_start_looking
self._ending_start_looking = ending_start_looking
test_frames = []
if mpeg_test:
test_frames = list(self.is_mpeg_test())
# Parse beginning of file, when needed. In reality, this is run every
# time init is run. The set_mpeg_details, XING, VBRI uses the first
# frames so we cannot make this very lazy.
begin_frames = lambda: self.parse_beginning(begin_start_looking)
# Parse ending of file, when needed.
end_frames = lambda: self.parse_ending(ending_start_looking)
# Creates frame iterator between begin and end frames.
self.frames = MPEGAudioFrameIterator(self, begin_frames, end_frames)
# Set MPEGAudio Details
self.set_mpeg_details(self.frames[0], test_frames)
# Parse VBR Headers if can be found.
self.parse_xing()
self.parse_vbri()
# Close for now
self.close()
def _get_size(self, parse_all=False, parse_ending=True):
"""MPEGAudio Size getter.
:rtype: int, or None
"""
if self._size is not None:
return self._size
if parse_ending:
# 100% accurate size, if parsing ending did indeed return frame from
# same MPEGAudio:
self.size = self.frames[-1].offset + self.frames[-1].size - \
self.frames[0].offset
else:
# TODO: NORMAL: Estimation of size Following might be a good enough
# for 99% of time, maybe it should be default? A biggest risk is
# that files with a *huge* footer will yield totally inaccurate
# values, is that risk too big?
#
# Should we choose a higher accuracy over performance with 99% of
# cases?
self.size = self.filesize - self._ending_start_looking - \
self.frames[0].offset
# TODO: LOW: parse_all in here is redundant, parse_ending gives 100%
# accurate.
if parse_all:
self.frames.parse_all()
return self._size
def _set_size(self, value):
"""MPEGAudio Size setter."""
self._size = value
def _get_sample_count(self, parse_all=False, parse_ending=True):
"""Sample count getter.
:rtype: int, or None
"""
frame_count = self._get_frame_count(parse_all=parse_all,
parse_ending=parse_ending)
if frame_count is not None:
return self.frame_count * self.samples_per_frame
return None
def _get_bitrate(self, parse_all=True):
"""Bitrate getter.
:rtype: int, float, or None
"""
if self._bitrate is not None:
return self._bitrate
if self.is_vbr:
sample_count = self._get_sample_count(parse_all)
mpeg_size = self._get_size()
self.bitrate = headers.get_vbr_bitrate(mpeg_size, sample_count,
self.sample_rate)
return self._bitrate
def _set_bitrate(self, value):
"""Bitrate setter."""
self._bitrate = value
def _get_frame_count(self, parse_all=False, parse_ending=True):
"""Frame count getter.
:rtype: int, or None
"""
if self._frame_count is not None:
return self._frame_count
if not self.is_vbr:
# CBR
mpeg_size = self._get_size(parse_all=parse_all,
parse_ending=parse_ending)
first_frame = self.frames[0]
unpadded_frame_size = first_frame.size - first_frame._padding_size
# unpadded_frames = float(self.size) / float(unpadded_frame_size)
padded_frame_size = unpadded_frame_size + 1
padded_frames = float(mpeg_size) / float(padded_frame_size)
# TODO: NORMAL: Estimation of frame_count:
# it seems to be either this:
self._frame_count = int(math.ceil(padded_frames))
# or this:
#self._frame_count = int(unpadded_frames)
# now how can we guess which one?
# print unpadded_frames, padded_frames
# Average it aint:
#self._frame_count = int(round((unpadded_frames + padded_frames) / \
# float(2)))
else:
# VBR
self.frames.parse_all()
#raise NotImplementedError('Frame count not yet lazy.')
return self._frame_count
def _set_frame_count(self, value):
"""Frame count setter."""
self._frame_count = value
def _get_frame_size(self, parse_all=True):
"""Frame size getter.
:rtype: int, or None
"""
if self._frame_size is not None:
return self._frame_size
if not self.is_vbr:
# CBR
self.frame_size = self.frames[0].size
else:
# VBR
frame_count = self._get_frame_count()
mpeg_size = self._get_size()
self.frame_size = headers.get_vbr_frame_size(mpeg_size, frame_count)
return self._frame_size
def _set_frame_size(self, value):
"""Frame size setter."""
self._frame_size = value
def _get_duration(self, parse_all=True):
"""Duration getter.
:rtype: datetime.timedelta, or None
"""
if self._duration is not None:
return self._duration
if not self.is_vbr:
# CBR
sample_count = self._get_sample_count(parse_all=False,
parse_ending=True)
if sample_count is not None:
self.duration = \
headers.get_duration_from_sample_count(sample_count,
self.sample_rate)
# mpeg_size = self._get_size()
# bitrate = self._get_bitrate(parse_all)
# if (bitrate is not None) and (mpeg_size is not None):
# self.duration = \
# _get_duration_from_size_bitrate(mpeg_size=self.size,
# bitrate=self.bitrate)
else:
# VBR
sample_count = self._get_sample_count(parse_all)
if sample_count is not None:
self.duration = \
headers.get_duration_from_sample_count(sample_count,
self.sample_rate)
return self._duration
def _set_duration(self, value):
"""Duration setter."""
self._duration = value
size = property(_get_size, _set_size)
"""MPEGAudio Size in bytes.
.. note::
May start parsing of :func:`all frames<MPEGAudio.parse_all>`,
or :func:`ending frames<MPEGAudio.parse_ending>`.
:type: int
"""
sample_count = property(_get_sample_count)
"""Count of samples in MPEGAudio.
.. note:: May start parsing of all frames.
:type: int
"""
frame_size = property(_get_frame_size, _set_frame_size)
"""Frame size in bytes.
For VBR files this is *average frame size*.
.. note:: May start parsing of all frames.
:type: int
"""
bitrate = property(_get_bitrate, _set_bitrate)
"""Bitrate of the *file* in kilobits per second, for example 192.
For VBR files this is *average bitrate* returned as ``float``.
.. note:: May start parsing of all frames.
:type: int, or float
"""
frame_count = property(_get_frame_count, _set_frame_count)
"""Count of frames in MPEGAudio.
.. note:: May start parsing of all frames.
:type: int
"""
duration = property(_get_duration, _set_duration)
"""Duration.
.. note:: May start parsing of all frames.
:type: datetime.timedelta
"""
def parse_xing(self):
"""Tries to parse and set XING from first mpeg frame.
:see: :class:`XING`
"""
from xing import XING, XINGHeaderException
try:
self.xing = XING.find_and_parse(self._file, self.frames[0].offset)
except XINGHeaderException:
pass
else:
VBRHeader.set_mpeg(self, self.xing)
def parse_vbri(self):
"""Tries to parse and set VBRI from first mpeg frame.
:see: :class:`VBRI`
"""
from vbri import VBRI, VBRIHeaderException
try:
self.vbri = VBRI.find_and_parse(self._file, self.frames[0].offset)
except VBRIHeaderException:
pass
else:
VBRHeader.set_mpeg(self, self.vbri)
def is_mpeg_test(self, test_position=None):
"""Test that the file is MPEGAudio.
Validates that from middle of the file we can find three valid
consecutive MPEGAudio frames.
:raise headers.MPEGAudioHeaderException: Raised if MPEGAudio frames
cannot be found.
:return: List of test MPEGAudio frames.
:rtype: list
"""
# The absolute theoretical maximum frame size is 2881 bytes:
# MPEGAudio 2.5 Layer II, 8000 Hz @ 160 kbps, with a padding slot.
#
# To get three consecutive headers we need (in bytes):
# (Max Frame Size + Header Size) * (Amount of consecutive frames + 1)
#
# This calculation yields (2881+4)*4 = 11 540, which I decided to round
# to (2^14 = 16 384)
# TODO: LOW: Some people use random position in the middle, but why?
#
# If test position is not given explicitely it is assumed to be at the
# middle of "start" and "end" of looking.
if test_position is None:
looking_length = self.filesize - self._ending_start_looking - \
self._begin_start_looking
test_position = self._begin_start_looking + \
int(0.5 * looking_length)
try:
return utils.genmin(MPEGAudioFrame.find_and_parse(file=self._file,
max_frames=3,
chunk_size=16384,
begin_frame_search=test_position,
lazily_after=2,
max_chunks=1),
3)
except ValueError:
raise MPEGAudioHeaderException("MPEG Test is not passed, "
"file might not be MPEG?")
def set_mpeg_details(self, first_mpegframe, mpegframes):
"""Sets details of *this* MPEGAudio from the given frames.
Idea here is that usually one or multiple mpeg frames represents single
MPEGAudio file with good probability, only if the file is VBR this fails.
:param first_mpegframe: First MPEGAudio frame of the file.
:type first_mpegframe: :class:`MPEGAudioFrame`
:param mpegframes: List of MPEGAudio frames, order and position does not
matter, only thing matters are the fact they are from same
MPEGAudio. These are used determine the VBR status of the file.
:type mpegframes: [:class:`MPEGAudioFrame`, ...]
"""
# Copy values of MPEGAudio Frame to MPEGAudio, where applicable.
self.is_copyrighted = first_mpegframe.is_copyrighted
self.is_original = first_mpegframe.is_original
self.is_private = first_mpegframe.is_private
self.is_protected = first_mpegframe.is_protected
self.offset = first_mpegframe.offset
self.channel_mode = first_mpegframe.channel_mode
self.channel_mode_extension = first_mpegframe.channel_mode_extension
self.emphasis = first_mpegframe.emphasis
self.sample_rate = first_mpegframe.sample_rate
self.samples_per_frame = first_mpegframe.samples_per_frame
self.frame_size = first_mpegframe.size
self.bitrate = first_mpegframe.bitrate
# If no testing frames was given, resort to getting last three frames.
if len(mpegframes) == 0:
mpegframes = self.frames[-3:]
# If any of the bitrates differ, this is most likely VBR.
self.is_vbr = any(mpegframe.bitrate != first_mpegframe.bitrate \
for mpegframe in mpegframes)
if self.is_vbr:
self.bitrate = None
self.frame_size = None
self.frame_count = None
def parse_all(self, force=False):
"""Parse all frames.
You should not need to call this, the initialization of
:class:`MPEGAudio`, or getters does this automatically.
By parsing all frames, MPEGAudio is ensured to populate following fields
with *accurate values*:
- ``frame_count``
- ``bitrate``
Essentially all properties, and variables of MPEGAudio should be as
accurate as possible after running this.
:param force: Force re-parsing all frames. Defaults to ``False``.
:type force: bool
"""
# Semantically, I think, only frames should have parse_all() only, thus
# this MPEGAudio.parse_all() exists purely because user of this API
# should not need to guess the "extra" semantics of frames and
# MPEGAudio.
self.frames.parse_all(force=force)
def parse_beginning(self, begin_offset=0, max_frames=6):
"""Parse beginning of MPEGAudio.
:param begin_offset: Beginning offset, from beginning of file.
:type begin_offset: int
:param max_frames: Maximum of frames to be parsed, and returned
forward from first found frame. ``-1`` means *infinity*, and can be
looped to end of file.
:type max_frames: int
:return: List of MPEGAudio frames.
:rtype: [:class:`MPEGAudioFrame`, ...]
:raise headers.MPEGAudioHeaderException: Raised if no frames was
found. This should not happen if :class:`MPEGAudio.is_mpeg_test` has
passed.
"""
try:
return utils.genmin(\
MPEGAudioFrame.find_and_parse(file=self._file,
max_frames=max_frames,
begin_frame_search=begin_offset),
1)
except ValueError:
raise MPEGAudioHeaderEOFException(
"There is not enough frames in this file.")
def parse_ending(self, end_offset=0, min_frames=3, rewind_offset=4000):
"""Parse ending of MPEGAudio.
You should not need to call this, the initialization of
:class:`MPEGAudio`, or getters does this automatically.
.. note::
Performance wisely the max_frames argument would be useless, and is
not implemented. As this method must try recursively find_and_parse
further and further from the ending until minimum of frames is met.
This might take a long time for files that does not have frames.
:param end_offset: End offset as relative to *end of file*, if you
know the *size of footers*, give that.
:type end_offset: int
:param min_frames: Minimum amount of frames from the end of file.
:type min_frames: int
:param rewind_offset: When minimum is not met, rewind the offset
this much and retry. Defaults to ``4000``.
:type rewind_offset: int
:return: List of MPEGAudio frames, amount of items is variable.
:rtype: [:class:`MPEGAudioFrame`, ...]
:raise headers.MPEGAudioHeaderEOFException: Raised if whole file does
not include any frames. This should not happen if
:func:`MPEGAudio.is_mpeg_test` has passed.
"""
# min_frames is always positive:
min_frames = max(min_frames, 1)
begin_frame_search = self.filesize - end_offset
end_frames = []
while True:
# Oh noes, not enough frames.
if len(end_frames) < min_frames:
begin_frame_search -= rewind_offset
# Retry from backwards...
end_frames = \
list(MPEGAudioFrame.find_and_parse(\
file=self._file,
max_frames=None,
begin_frame_search=begin_frame_search))
if begin_frame_search < 0 and len(end_frames) < min_frames:
raise MPEGAudioHeaderException(
'Not enough frames was found')
else:
return end_frames
class VBRHeader(object):
"""VBR Header"""
@classmethod
def set_mpeg(cls, mpeg, vbr):
"""Set values of VBR header to MPEGAudio.
:param mpeg: MPEGAudio to be set.
:type mpeg: :class:`MPEGAudio`
:param vbr: VBR from where to set.
:type vbr: :class:`VBRHeader`
"""
if vbr.frame_count is not None:
mpeg.frame_count = vbr.frame_count
if vbr.mpeg_size is not None:
mpeg.size = vbr.mpeg_size
| 33.217028 | 83 | 0.569659 | """
Python package which is intended to gather all kinds of MPEG-1 Audio related
meta information from file. Such as duration of MPEGAudio file, average bitrate
for variable bitrate (VBR) MPEGAudio files, etc.
Most of the information about MPEGAudio Headers is from excellent article
`MPEGAudio Audio Frame Header By Konrad Windszus, in Code Project
<http://www.codeproject.com/KB/audio-video/mpegaudioinfo.aspx#MPEGAudioFrame>`_.
If you are solely interested on details of MPEGAudio headers that is a good
place to start. Unit tests (:file:`tests/` -directory) are matched against the
MPEGAudioInfo.exe provided in that project.
Notable feature of mpeg1audio is the fact that it :doc:`tries to parse
lazily</laziness>`. It doesn't parse all frames, or ending unless really needed.
.. todo:: Free bitrate, this should be simple to implement, though I haven't yet
found any free bitrate files which to test against.
.. todo:: Table of contents for VBR, this is not high on priority list since we
don't need to seek the MPEGAudio really.
Usage example
-------------
>>> import mpeg1audio
>>> try:
... mp3 = mpeg1audio.MPEGAudio(open('data/song.mp3', 'rb'))
... except mpeg1audio.MPEGAudioHeaderException:
... pass
... else:
... print mp3.duration
0:03:12
Why the exception? It may seem unnecessary, but it has a purpose so that there
cannot be *empty* MPEGAudio instances, those are more infuriating than the
handling of exception.
"""
import os
from mpeg1audio.utils import FileOpener
__version__ = "0.5.5"
__release__ = "0.5.5 alpha"
__copyright__ = "Jari Pennanen, 2010"
__description__ = "MPEG-1 Audio package"
__author__ = "Jari Pennanen"
__license__ = "FreeBSD, see COPYING"
# Pylint disable settings:
# ------------------------
# ToDos, DocStrings:
# pylint: disable-msg=W0511,W0105
#
# Unused variable, argument:
# pylint: disable-msg=W0612,W0613
#
# Re-define built-in:
# pylint: disable-msg=W0622
#
# Protected member access:
# pylint: disable-msg=W0212
#
# Too many lines per module:
# pylint: disable-msg=C0302
#
# Too many instance attributes, Too few public methods:
# pylint: disable-msg=R0902,R0903
#
# TODO: LOW: I don't like the verboseness of EpyDoc syntax, maybe change to
# reStructuredText?
from datetime import timedelta
from mpeg1audio import headers
from mpeg1audio import utils
from headers import MPEGAudioHeaderEOFException, MPEGAudioHeaderException
import math
import struct
__all__ = ['MPEGAudioFrameBase', 'MPEGAudioFrameIterator', 'MPEGAudioFrame',
'MPEGAudio', 'MPEGAudioHeaderException',
'MPEGAudioHeaderEOFException', 'PARSE_ALL_CHUNK_SIZE', 'headers',
'utils', 'vbri', 'xing']
PARSE_ALL_CHUNK_SIZE = 153600
"""Chunk size of parsing all frames.
:type: int"""
class MPEGAudioFrameBase(object):
"""MPEGAudio frame base, should not be instated, only inherited.
Variables defined here are constant through out the frames of
:class:`MPEGAudio`.
"""
def __init__(self):
self.is_private = False
"""Is private?
:type: bool
"""
self.is_copyrighted = False
"""Is copyrighted?
:type: bool
"""
self.samples_per_frame = None
"""Samples per frame
:type: int
"""
self.is_original = False
"""Is original?
:type: bool
"""
self.is_protected = False
"""Is protected?
:type: bool
"""
self._padding_size = 0
"""Padding size of header.
:type: int"""
self.version = None
"""MPEGAudio Version.
:type: string
"""
self.layer = None
"""Layer number.
:type: string
"""
self.sample_rate = None
"""Sampling rate in Hz.
:type: int
"""
self.channel_mode = None
"""Channel mode.
:type: string
"""
self.channel_mode_extension = None
"""Channel mode extension.
:type: string
"""
self.emphasis = None
"""Emphasis.
:type: string
"""
self.offset = None
"""Offset of the MPEGAudio Frame header *in file*.
.. note::
Offset points to *beginning of header's first byte*, and is *not*
offset of beginning of data.
:type: int
"""
class MPEGAudioFrame(MPEGAudioFrameBase):
"""MPEGAudio *Frame* meta data."""
def __init__(self):
super(MPEGAudioFrame, self).__init__()
self.bitrate = None
"""Bitrate in kilobits, for example 192.
In the MPEGAudio audio standard there is a :term:`free bitrate` format
described. This free format means that the file is encoded with a
constant bitrate, which is not one of the predefined bitrates. Only very
few decoders can handle those files.
:type: int
"""
self.samples_per_frame = None
"""Samples per frame.
:type: int
"""
self.size = None
"""Frame size in bytes.
.. note:: Includes the header (4) bytes.
.. note::
Beware when the bitrate is ``0`` for :term:`free bitrate`
frames, the value is ``None``.
:type: int, or None
"""
def get_forward_iterator(self, file, chunk_size=None):
"""Get forward iterator from this position.
:param file: File object
:type file: file object
:param chunk_size: Chunked reading size, ``None`` defaults to
:const:`mpeg1audio.utils.DEFAULT_CHUNK_SIZE`.
:type chunk_size: int
:return: Generator that iterates forward from this frame.
:rtype: generator of :class:`MPEGAudioFrame`
"""
# TODO: LOW: Free bitrate.
next_frame_offset = self.offset + self.size
chunks = utils.chunked_reader(file, start_position=next_frame_offset,
chunk_size=chunk_size)
return MPEGAudioFrame.parse_consecutive(next_frame_offset, chunks)
# def get_backward_iterator(self, file):
# # TODO: LOW: Backward iterator
# raise NotImplementedError('Backward iteration not implemented!')
@classmethod
def find_and_parse(cls, file, max_frames=3, chunk_size=None, #IGNORE:R0913
begin_frame_search= -1, lazily_after=1,
max_chunks= -1, max_consecutive_chunks= -1):
"""Find and parse from file.
:param file: File object being searched.
:type file: file object
:param max_frames: Maximum of frames returned. Defaults to ``3``.
``None`` means give all frames as lazy generator.
:type max_frames: int, or None
:param chunk_size: Size of chunked reading. Defaults to
:const:`utils.DEFAULT_CHUNK_SIZE`, minimum ``4``.
:type chunk_size: int
:param begin_frame_search: Begin frame search from this position in
file. Defaults to ``-1``, meaning continue where file pointer has
left.
:type begin_frame_search: int
:param lazily_after: Check also next header(s), before becoming
lazy generator. Defaults to ``1``.
:type lazily_after: int
:param max_chunks: Maximum amount of chunks the chunked reader can
yield. ``-1`` means infinity, and can be looped to end of file.
:type max_chunks: int
:param max_consecutive_chunks: Maximum of *consecutive* chunks in
returned lazy generator. ``-1`` means infinity, and can be looped to
end of file.
:type max_consecutive_chunks: int
"""
chunk_size = chunk_size or utils.DEFAULT_CHUNK_SIZE
chunk_size = max(chunk_size, 4)
chunks = utils.chunked_reader(file, chunk_size=chunk_size,
start_position=begin_frame_search,
max_chunks=max_chunks)
for chunk_offset, chunk in chunks:
for found in utils.find_all_overlapping(chunk, chr(255)):
consecutive_chunks = \
utils.chunked_reader(file,
chunk_size=chunk_size,
start_position=chunk_offset + found,
max_chunks=max_consecutive_chunks)
frames = MPEGAudioFrame.parse_consecutive(chunk_offset + found,
consecutive_chunks)
try:
return utils.genlimit(frames, lazily_after + 1, max_frames)
except ValueError:
pass
return iter([])
@classmethod
def parse_consecutive(cls, header_offset, chunks):
"""Parse consecutive MPEGAudio Frame headers.
Parses from given position until header parsing error, or end of chunks.
:param header_offset: Header offset *within a file*.
:type header_offset: int
:param chunks: Generator yielding more chunks when *End of Chunk* is
reached.
:type chunks: generator, or list
:return: Generator yielding MPEGAudio frames.
:rtype: generator of :class:`MPEGFrame`
:see: :func:`utils.chunked_reader()`
"""
previous_mpegframe = None
previous_mpegframe_offset = None
previous_chunk = ""
next_mpegframe_offset = header_offset
for next_chunk_offset, next_chunk in chunks:
# Get 4 bytes from previous chunk
previous_chunk_end = previous_chunk[-4:]
# Join the 4 bytes, if there were any, to tested chunk
chunk = previous_chunk_end + next_chunk
chunk_offset = next_chunk_offset - len(previous_chunk_end)
# Yield all frames in chunk
while True:
if (previous_mpegframe is not None) and \
(previous_mpegframe_offset is not None):
if previous_mpegframe.size is None:
return
# TODO: LOW: Free bitrate, you must search for the
# second frame.
next_mpegframe_offset = previous_mpegframe_offset + \
previous_mpegframe.size
next_mpegframe = None
next_header_offset = next_mpegframe_offset - chunk_offset
# Get header bytes within chunk
try:
header_bytes = headers.get_bytes(next_header_offset, chunk)
except MPEGAudioHeaderEOFException:
# We need next chunk, end of this chunk was reached
break
# Parse and append if parseable
try:
next_mpegframe = MPEGAudioFrame.parse(header_bytes)
except MPEGAudioHeaderException:
return
else:
# Frame was parsed successfully
next_mpegframe.offset = next_mpegframe_offset
yield next_mpegframe
previous_mpegframe_offset = next_mpegframe_offset
previous_mpegframe = next_mpegframe
previous_chunk = next_chunk
return
@classmethod
def parse(cls, bytes):
"""Tries to create MPEGAudio Frame from given bytes.
:param bytes: MPEGAudio Header bytes. Usually obtained with
:func:`headers.get_bytes`
:type bytes: int
:rtype: :class:`MPEGAudioFrame`
:return: MPEGAudio Frame
:raise headers.MPEGAudioHeaderException: Raised if MPEGAudio Frame
cannot be parsed.
"""
# TODO: LOW: CRC, verify and parse.
# http://www.codeproject.com/KB/audio-video/mpegaudioinfo.aspx#CRC
# Header synchronization bits
headers.check_sync_bits((bytes >> 21) & 2047)
# Header parseable information
mpeg_version_bits = (bytes >> 19) & 3
layer_bits = (bytes >> 17) & 3
protection_bit = (bytes >> 16) & 1
bitrate_bits = (bytes >> 12) & 15
samplerate_bits = (bytes >> 10) & 3
padding_bit = (bytes >> 9) & 1
private_bit = (bytes >> 8) & 1
mode_bits = (bytes >> 6) & 3
mode_extension_bits = (bytes >> 4) & 3
copyright_bit = (bytes >> 3) & 1
original_bit = (bytes >> 2) & 1
emphasis_bits = (bytes >> 0) & 3
self = MPEGAudioFrame()
self.version = headers.get_mpeg_version(mpeg_version_bits)
self.layer = headers.get_layer(layer_bits)
self.bitrate = headers.get_bitrate(self.version, self.layer,
bitrate_bits)
self.sample_rate = headers.get_sample_rate(self.version,
samplerate_bits)
self.channel_mode = headers.get_channel_mode(mode_bits)
self.channel_mode_extension = \
headers.get_channel_mode_ext(self.layer, mode_extension_bits)
self.emphasis = headers.get_emphasis(emphasis_bits)
self._padding_size = padding_bit
self.is_private = private_bit == 1
self.is_copyrighted = copyright_bit == 1
self.is_original = original_bit == 1
self.is_protected = protection_bit == 1
# Non-header parseable information
self.samples_per_frame = headers.get_samples_per_frame(self.version,
self.layer)
self.size = headers.get_frame_size(self.version, self.layer,
self.sample_rate, self.bitrate,
self._padding_size)
return self
class MPEGAudioFrameIterator(object):
"""MPEGAudio Frame iterator, for lazy evaluation."""
def __init__(self, mpeg, begin_frames, end_frames):
"""
:param mpeg: MPEGAudio Which frames are to be iterated over.
:type mpeg: :class:`MPEGAudio`
:param begin_frames: First frames of MPEGAudio.
:type begin_frames: lambda: [:class:`MPEGAudioFrame`, ...]
:param end_frames: End frames of MPEGAudio.
:type end_frames: lambda: [:class:`MPEGAudioFrame`, ...]
"""
self.mpeg = mpeg
"""MPEGAudio which frames are iterated.
:type: :class:`MPEGAudio`
"""
self._begin_frames = begin_frames
"""Begin frames.
:type: list of :class:`MPEGAudioFrame`
"""
self._end_frames = end_frames
"""End frames.
:type: list of :class:`MPEGAudioFrame`, or None
"""
self._has_parsed_all = False
"""Has parsing all occurred?
:type: bool
"""
self._has_parsed_beginning = not callable(self._begin_frames)
"""Has parsing beginning occurred?
:type: bool
"""
self._has_parsed_ending = not callable(self._end_frames)
"""Has parsing end occurred?
:type: bool
"""
def __len__(self):
pass
def parse_all(self, force=False):
"""Parse all frames.
:see: :func:`MPEGAudio.parse_all`
"""
# TODO: LOW: How do we deal corrupted MPEGAudio files?
# Where some frames are misplaced, etc?
if self._has_parsed_all and not force:
# TODO: DEBUG!
raise NotImplementedError('This should not happen, ever!')
# return
avg_bitrate = 0
index = -1
for index, frame in enumerate(self):
avg_bitrate += frame.bitrate
# Close for now
self.mpeg.close()
frame_count = index + 1
bitrate = avg_bitrate / frame_count
# Set MPEGAudio values
self.mpeg.frame_count = frame_count
self.mpeg.bitrate = bitrate
# Set has parsed all
self._has_parsed_all = True
# def __reversed__(self):
# # TODO: LOW: Backward iterator
# pass
def __iter__(self):
# Join begin frames, and generator yielding next frames from that on.
# TODO: ASSUMPTION: Iterating frames uses parsing all chunk size.
return utils.join_iterators(\
self._begin_frames,
self._begin_frames[-1].\
get_forward_iterator(self.mpeg._file,
chunk_size=PARSE_ALL_CHUNK_SIZE))
def __getitem__(self, key):
# TODO: LOW: Following is misleading, _begin_frames and _end_frames does
# not include all keys, works for now.
if key < 0:
# Lazy evaluate
if callable(self._end_frames):
self._end_frames = list(self._end_frames())
self._has_parsed_ending = True
return self._end_frames[key]
else:
# Lazy evaluate
if callable(self._begin_frames):
self._begin_frames = list(self._begin_frames())
self._has_parsed_beginning = True
return self._begin_frames[key]
class MPEGAudio(MPEGAudioFrameBase):
"""
Parses MPEGAudio file meta data.
Uses Xing and VBRI headers if neccessary, for better performance with VBR
files. VBR files that doesn't have those headers the file must parse all
frames.
"""
_file = FileOpener(mode='rb')
"""Opens the file when needed"""
def __init__(self, file, begin_start_looking=0, ending_start_looking=0,
mpeg_test=True):
"""
.. todo:: If given filename, create file and close it always automatically
when not needed.
:param file: File handle returned e.g. by open(). Alternatively path to
file which to open on request.
:type file: file object, or string
:param begin_start_looking: Start position of MPEGAudio header search.
For example if you know that file has ID3v2, it is adviced to give
the size of ID3v2 tag to this field.
Value *must be equal or lesser than* (<=) the beginning of
MPEGAudio. If the given value exceeds the first header, the given
MPEGAudio might be incorrect.
:type begin_start_looking: int
:param ending_start_looking: End position of MPEGAudio *relative to end
of file*. For example if you know that file has ID3v1 footer, give
``128``, the size of ID3v1, this ensures that we can *at least* skip
over that.
Value *must be equal or lesser than* (<=) end of the last
MPEGAudio header.
:type ending_start_looking: int
:param mpeg_test: Do mpeg test first before continuing with parsing the
beginning. This is useful especially if there is even slight
possibility that given file is not MPEGAudio, we can rule them out
fast.
:type mpeg_test: bool
:raise headers.MPEGAudioHeaderException: Raised if header cannot be
found.
"""
super(MPEGAudio, self).__init__()
self._filepath = None
"""File path
type: String, unicode, or :const:`None`
"""
self._filehandle = None
"""File handle when instiated using path to file.
type: File object, or :const:`None`
"""
# If instiated using path to file
if isinstance(file, (str, unicode)):
self._filepath = file
# Open the file
try:
file = open(file, "rb")
except (IOError, os.error):
raise MPEGAudioHeaderException(
'File %s cannot be opened' % file)
self._filehandle = file
# If instiated using file object
else:
self._file = file
"""File object.
:type: file object
"""
self.is_vbr = False
"""Is variable bitrate?
type: bool
"""
self.filesize = utils.get_filesize(file)
"""Filesize in bytes.
:type: int
"""
self.xing = None
"""XING Header, if any.
:type: :class:`XING`, or None
"""
self.vbri = None
"""VBRI Header, if any.
:type: :class:`VBRI`, or None
"""
self.frames = None
"""All MPEGAudio frames.
:type: iterator for :class:`MPEGAudioFrame`
"""
self._frame_count = None
self._frame_size = None
self._size = None
self._duration = None
self._bitrate = None
self._begin_start_looking = begin_start_looking
self._ending_start_looking = ending_start_looking
test_frames = []
if mpeg_test:
test_frames = list(self.is_mpeg_test())
# Parse beginning of file, when needed. In reality, this is run every
# time init is run. The set_mpeg_details, XING, VBRI uses the first
# frames so we cannot make this very lazy.
begin_frames = lambda: self.parse_beginning(begin_start_looking)
# Parse ending of file, when needed.
end_frames = lambda: self.parse_ending(ending_start_looking)
# Creates frame iterator between begin and end frames.
self.frames = MPEGAudioFrameIterator(self, begin_frames, end_frames)
# Set MPEGAudio Details
self.set_mpeg_details(self.frames[0], test_frames)
# Parse VBR Headers if can be found.
self.parse_xing()
self.parse_vbri()
# Close for now
self.close()
def close(self):
if self._filehandle:
self._filehandle.close()
def _get_size(self, parse_all=False, parse_ending=True):
"""MPEGAudio Size getter.
:rtype: int, or None
"""
if self._size is not None:
return self._size
if parse_ending:
# 100% accurate size, if parsing ending did indeed return frame from
# same MPEGAudio:
self.size = self.frames[-1].offset + self.frames[-1].size - \
self.frames[0].offset
else:
# TODO: NORMAL: Estimation of size Following might be a good enough
# for 99% of time, maybe it should be default? A biggest risk is
# that files with a *huge* footer will yield totally inaccurate
# values, is that risk too big?
#
# Should we choose a higher accuracy over performance with 99% of
# cases?
self.size = self.filesize - self._ending_start_looking - \
self.frames[0].offset
# TODO: LOW: parse_all in here is redundant, parse_ending gives 100%
# accurate.
if parse_all:
self.frames.parse_all()
return self._size
def _set_size(self, value):
"""MPEGAudio Size setter."""
self._size = value
def _get_sample_count(self, parse_all=False, parse_ending=True):
"""Sample count getter.
:rtype: int, or None
"""
frame_count = self._get_frame_count(parse_all=parse_all,
parse_ending=parse_ending)
if frame_count is not None:
return self.frame_count * self.samples_per_frame
return None
def _get_bitrate(self, parse_all=True):
"""Bitrate getter.
:rtype: int, float, or None
"""
if self._bitrate is not None:
return self._bitrate
if self.is_vbr:
sample_count = self._get_sample_count(parse_all)
mpeg_size = self._get_size()
self.bitrate = headers.get_vbr_bitrate(mpeg_size, sample_count,
self.sample_rate)
return self._bitrate
def _set_bitrate(self, value):
"""Bitrate setter."""
self._bitrate = value
def _get_frame_count(self, parse_all=False, parse_ending=True):
"""Frame count getter.
:rtype: int, or None
"""
if self._frame_count is not None:
return self._frame_count
if not self.is_vbr:
# CBR
mpeg_size = self._get_size(parse_all=parse_all,
parse_ending=parse_ending)
first_frame = self.frames[0]
unpadded_frame_size = first_frame.size - first_frame._padding_size
# unpadded_frames = float(self.size) / float(unpadded_frame_size)
padded_frame_size = unpadded_frame_size + 1
padded_frames = float(mpeg_size) / float(padded_frame_size)
# TODO: NORMAL: Estimation of frame_count:
# it seems to be either this:
self._frame_count = int(math.ceil(padded_frames))
# or this:
#self._frame_count = int(unpadded_frames)
# now how can we guess which one?
# print unpadded_frames, padded_frames
# Average it aint:
#self._frame_count = int(round((unpadded_frames + padded_frames) / \
# float(2)))
else:
# VBR
self.frames.parse_all()
#raise NotImplementedError('Frame count not yet lazy.')
return self._frame_count
def _set_frame_count(self, value):
"""Frame count setter."""
self._frame_count = value
def _get_frame_size(self, parse_all=True):
"""Frame size getter.
:rtype: int, or None
"""
if self._frame_size is not None:
return self._frame_size
if not self.is_vbr:
# CBR
self.frame_size = self.frames[0].size
else:
# VBR
frame_count = self._get_frame_count()
mpeg_size = self._get_size()
self.frame_size = headers.get_vbr_frame_size(mpeg_size, frame_count)
return self._frame_size
def _set_frame_size(self, value):
"""Frame size setter."""
self._frame_size = value
def _get_duration(self, parse_all=True):
"""Duration getter.
:rtype: datetime.timedelta, or None
"""
if self._duration is not None:
return self._duration
if not self.is_vbr:
# CBR
sample_count = self._get_sample_count(parse_all=False,
parse_ending=True)
if sample_count is not None:
self.duration = \
headers.get_duration_from_sample_count(sample_count,
self.sample_rate)
# mpeg_size = self._get_size()
# bitrate = self._get_bitrate(parse_all)
# if (bitrate is not None) and (mpeg_size is not None):
# self.duration = \
# _get_duration_from_size_bitrate(mpeg_size=self.size,
# bitrate=self.bitrate)
else:
# VBR
sample_count = self._get_sample_count(parse_all)
if sample_count is not None:
self.duration = \
headers.get_duration_from_sample_count(sample_count,
self.sample_rate)
return self._duration
def _set_duration(self, value):
"""Duration setter."""
self._duration = value
size = property(_get_size, _set_size)
"""MPEGAudio Size in bytes.
.. note::
May start parsing of :func:`all frames<MPEGAudio.parse_all>`,
or :func:`ending frames<MPEGAudio.parse_ending>`.
:type: int
"""
sample_count = property(_get_sample_count)
"""Count of samples in MPEGAudio.
.. note:: May start parsing of all frames.
:type: int
"""
frame_size = property(_get_frame_size, _set_frame_size)
"""Frame size in bytes.
For VBR files this is *average frame size*.
.. note:: May start parsing of all frames.
:type: int
"""
bitrate = property(_get_bitrate, _set_bitrate)
"""Bitrate of the *file* in kilobits per second, for example 192.
For VBR files this is *average bitrate* returned as ``float``.
.. note:: May start parsing of all frames.
:type: int, or float
"""
frame_count = property(_get_frame_count, _set_frame_count)
"""Count of frames in MPEGAudio.
.. note:: May start parsing of all frames.
:type: int
"""
duration = property(_get_duration, _set_duration)
"""Duration.
.. note:: May start parsing of all frames.
:type: datetime.timedelta
"""
def parse_xing(self):
"""Tries to parse and set XING from first mpeg frame.
:see: :class:`XING`
"""
from xing import XING, XINGHeaderException
try:
self.xing = XING.find_and_parse(self._file, self.frames[0].offset)
except XINGHeaderException:
pass
else:
VBRHeader.set_mpeg(self, self.xing)
def parse_vbri(self):
"""Tries to parse and set VBRI from first mpeg frame.
:see: :class:`VBRI`
"""
from vbri import VBRI, VBRIHeaderException
try:
self.vbri = VBRI.find_and_parse(self._file, self.frames[0].offset)
except VBRIHeaderException:
pass
else:
VBRHeader.set_mpeg(self, self.vbri)
def is_mpeg_test(self, test_position=None):
"""Test that the file is MPEGAudio.
Validates that from middle of the file we can find three valid
consecutive MPEGAudio frames.
:raise headers.MPEGAudioHeaderException: Raised if MPEGAudio frames
cannot be found.
:return: List of test MPEGAudio frames.
:rtype: list
"""
# The absolute theoretical maximum frame size is 2881 bytes:
# MPEGAudio 2.5 Layer II, 8000 Hz @ 160 kbps, with a padding slot.
#
# To get three consecutive headers we need (in bytes):
# (Max Frame Size + Header Size) * (Amount of consecutive frames + 1)
#
# This calculation yields (2881+4)*4 = 11 540, which I decided to round
# to (2^14 = 16 384)
# TODO: LOW: Some people use random position in the middle, but why?
#
# If test position is not given explicitely it is assumed to be at the
# middle of "start" and "end" of looking.
if test_position is None:
looking_length = self.filesize - self._ending_start_looking - \
self._begin_start_looking
test_position = self._begin_start_looking + \
int(0.5 * looking_length)
try:
return utils.genmin(MPEGAudioFrame.find_and_parse(file=self._file,
max_frames=3,
chunk_size=16384,
begin_frame_search=test_position,
lazily_after=2,
max_chunks=1),
3)
except ValueError:
raise MPEGAudioHeaderException("MPEG Test is not passed, "
"file might not be MPEG?")
def set_mpeg_details(self, first_mpegframe, mpegframes):
"""Sets details of *this* MPEGAudio from the given frames.
Idea here is that usually one or multiple mpeg frames represents single
MPEGAudio file with good probability, only if the file is VBR this fails.
:param first_mpegframe: First MPEGAudio frame of the file.
:type first_mpegframe: :class:`MPEGAudioFrame`
:param mpegframes: List of MPEGAudio frames, order and position does not
matter, only thing matters are the fact they are from same
MPEGAudio. These are used determine the VBR status of the file.
:type mpegframes: [:class:`MPEGAudioFrame`, ...]
"""
# Copy values of MPEGAudio Frame to MPEGAudio, where applicable.
self.is_copyrighted = first_mpegframe.is_copyrighted
self.is_original = first_mpegframe.is_original
self.is_private = first_mpegframe.is_private
self.is_protected = first_mpegframe.is_protected
self.offset = first_mpegframe.offset
self.channel_mode = first_mpegframe.channel_mode
self.channel_mode_extension = first_mpegframe.channel_mode_extension
self.emphasis = first_mpegframe.emphasis
self.sample_rate = first_mpegframe.sample_rate
self.samples_per_frame = first_mpegframe.samples_per_frame
self.frame_size = first_mpegframe.size
self.bitrate = first_mpegframe.bitrate
# If no testing frames was given, resort to getting last three frames.
if len(mpegframes) == 0:
mpegframes = self.frames[-3:]
# If any of the bitrates differ, this is most likely VBR.
self.is_vbr = any(mpegframe.bitrate != first_mpegframe.bitrate \
for mpegframe in mpegframes)
if self.is_vbr:
self.bitrate = None
self.frame_size = None
self.frame_count = None
def parse_all(self, force=False):
"""Parse all frames.
You should not need to call this, the initialization of
:class:`MPEGAudio`, or getters does this automatically.
By parsing all frames, MPEGAudio is ensured to populate following fields
with *accurate values*:
- ``frame_count``
- ``bitrate``
Essentially all properties, and variables of MPEGAudio should be as
accurate as possible after running this.
:param force: Force re-parsing all frames. Defaults to ``False``.
:type force: bool
"""
# Semantically, I think, only frames should have parse_all() only, thus
# this MPEGAudio.parse_all() exists purely because user of this API
# should not need to guess the "extra" semantics of frames and
# MPEGAudio.
self.frames.parse_all(force=force)
def parse_beginning(self, begin_offset=0, max_frames=6):
"""Parse beginning of MPEGAudio.
:param begin_offset: Beginning offset, from beginning of file.
:type begin_offset: int
:param max_frames: Maximum of frames to be parsed, and returned
forward from first found frame. ``-1`` means *infinity*, and can be
looped to end of file.
:type max_frames: int
:return: List of MPEGAudio frames.
:rtype: [:class:`MPEGAudioFrame`, ...]
:raise headers.MPEGAudioHeaderException: Raised if no frames was
found. This should not happen if :class:`MPEGAudio.is_mpeg_test` has
passed.
"""
try:
return utils.genmin(\
MPEGAudioFrame.find_and_parse(file=self._file,
max_frames=max_frames,
begin_frame_search=begin_offset),
1)
except ValueError:
raise MPEGAudioHeaderEOFException(
"There is not enough frames in this file.")
def parse_ending(self, end_offset=0, min_frames=3, rewind_offset=4000):
"""Parse ending of MPEGAudio.
You should not need to call this, the initialization of
:class:`MPEGAudio`, or getters does this automatically.
.. note::
Performance wisely the max_frames argument would be useless, and is
not implemented. As this method must try recursively find_and_parse
further and further from the ending until minimum of frames is met.
This might take a long time for files that does not have frames.
:param end_offset: End offset as relative to *end of file*, if you
know the *size of footers*, give that.
:type end_offset: int
:param min_frames: Minimum amount of frames from the end of file.
:type min_frames: int
:param rewind_offset: When minimum is not met, rewind the offset
this much and retry. Defaults to ``4000``.
:type rewind_offset: int
:return: List of MPEGAudio frames, amount of items is variable.
:rtype: [:class:`MPEGAudioFrame`, ...]
:raise headers.MPEGAudioHeaderEOFException: Raised if whole file does
not include any frames. This should not happen if
:func:`MPEGAudio.is_mpeg_test` has passed.
"""
# min_frames is always positive:
min_frames = max(min_frames, 1)
begin_frame_search = self.filesize - end_offset
end_frames = []
while True:
# Oh noes, not enough frames.
if len(end_frames) < min_frames:
begin_frame_search -= rewind_offset
# Retry from backwards...
end_frames = \
list(MPEGAudioFrame.find_and_parse(\
file=self._file,
max_frames=None,
begin_frame_search=begin_frame_search))
if begin_frame_search < 0 and len(end_frames) < min_frames:
raise MPEGAudioHeaderException(
'Not enough frames was found')
else:
return end_frames
class VBRHeader(object):
"""VBR Header"""
@classmethod
def set_mpeg(cls, mpeg, vbr):
"""Set values of VBR header to MPEGAudio.
:param mpeg: MPEGAudio to be set.
:type mpeg: :class:`MPEGAudio`
:param vbr: VBR from where to set.
:type vbr: :class:`VBRHeader`
"""
if vbr.frame_count is not None:
mpeg.frame_count = vbr.frame_count
if vbr.mpeg_size is not None:
mpeg.size = vbr.mpeg_size
def __init__(self):
self.offset = 0
"""Offset of header in file.
:type: int"""
self.size = 0
"""Size of header in file.
:type: int"""
self.frame_count = None
"""Frame count of MPEGAudio. (Optional)
:type: int, or None"""
self.mpeg_size = None
"""MPEGAudio Size in bytes. (Optional)
:type: int, or None"""
self.quality = None
"""VBR Quality.
:type: int, or None
"""
| 4,025 | 0 | 188 |
c796e4aedea9165d4abb0e19c96c41b69a14c690 | 1,762 | py | Python | lithium/manage/commands/elements.py | PressLabs/lithium | a222e4021aabcbec0fd24bcecf904a0ee7ec852d | [
"Apache-2.0"
] | 2 | 2015-03-20T10:57:14.000Z | 2015-03-20T11:03:39.000Z | lithium/manage/commands/elements.py | PressLabs/lithium | a222e4021aabcbec0fd24bcecf904a0ee7ec852d | [
"Apache-2.0"
] | null | null | null | lithium/manage/commands/elements.py | PressLabs/lithium | a222e4021aabcbec0fd24bcecf904a0ee7ec852d | [
"Apache-2.0"
] | null | null | null | all = [
' hydrogen',
' helium',
' lithium',
' beryllium',
' boron',
' carbon',
' nitrogen',
' oxygen',
' fluorine',
' neon',
' sodium',
' magnesium',
' aluminum',
' silicon',
' phosphorus',
' sulfur',
' chlorine',
' argon',
' potassium',
' calcium',
' scandium',
' titanium',
' vanadium',
' chromium',
' manganese',
' iron',
' cobalt',
' nickel',
' copper',
' zinc',
' gallium',
' germanium',
' arsenic',
' selenium',
' bromine',
' krypton',
' rubidium',
' strontium',
' yttrium',
' zirconium',
' niobium',
' molybdenum',
' technetium',
' ruthenium',
' rhodium',
' palladium',
' silver',
' cadmium',
' indium',
' tin',
' antimony',
' tellurium',
' iodine',
' xenon',
' cesium',
' barium',
' lanthanum',
' cerium',
' praseodymium',
' neodymium',
' promethium',
' samarium',
' europium',
' gadolinium',
' terbium',
' dysprosium',
' holmium',
' erbium',
' thulium',
' ytterbium',
' lutetium',
' hafnium',
' tantalum',
' tungsten',
' rhenium',
' osmium',
' iridium',
' platinum',
' gold',
' mercury',
' thallium',
' lead',
' bismuth',
' polonium',
' astatine',
' radon',
' francium',
' radium',
' actinium',
' thorium',
' protactinium',
' uranium',
' neptunium',
' plutonium',
' americium',
' curium',
' berkelium',
' californium',
' einsteinium',
' fermium',
' mendelevium',
' nobelium',
' lawrencium',
' rutherfordium',
' dubnium',
' seaborgium',
' bohrium',
' hassium',
' meitnerium',
' darmstadtium',
' roentgenium',
' copernicium',
' ununtrium',
' flerovium',
' ununpentium',
' livermorium',
' ununseptium',
' ununoctium',
]
| 14.561983 | 19 | 0.527242 | all = [
' hydrogen',
' helium',
' lithium',
' beryllium',
' boron',
' carbon',
' nitrogen',
' oxygen',
' fluorine',
' neon',
' sodium',
' magnesium',
' aluminum',
' silicon',
' phosphorus',
' sulfur',
' chlorine',
' argon',
' potassium',
' calcium',
' scandium',
' titanium',
' vanadium',
' chromium',
' manganese',
' iron',
' cobalt',
' nickel',
' copper',
' zinc',
' gallium',
' germanium',
' arsenic',
' selenium',
' bromine',
' krypton',
' rubidium',
' strontium',
' yttrium',
' zirconium',
' niobium',
' molybdenum',
' technetium',
' ruthenium',
' rhodium',
' palladium',
' silver',
' cadmium',
' indium',
' tin',
' antimony',
' tellurium',
' iodine',
' xenon',
' cesium',
' barium',
' lanthanum',
' cerium',
' praseodymium',
' neodymium',
' promethium',
' samarium',
' europium',
' gadolinium',
' terbium',
' dysprosium',
' holmium',
' erbium',
' thulium',
' ytterbium',
' lutetium',
' hafnium',
' tantalum',
' tungsten',
' rhenium',
' osmium',
' iridium',
' platinum',
' gold',
' mercury',
' thallium',
' lead',
' bismuth',
' polonium',
' astatine',
' radon',
' francium',
' radium',
' actinium',
' thorium',
' protactinium',
' uranium',
' neptunium',
' plutonium',
' americium',
' curium',
' berkelium',
' californium',
' einsteinium',
' fermium',
' mendelevium',
' nobelium',
' lawrencium',
' rutherfordium',
' dubnium',
' seaborgium',
' bohrium',
' hassium',
' meitnerium',
' darmstadtium',
' roentgenium',
' copernicium',
' ununtrium',
' flerovium',
' ununpentium',
' livermorium',
' ununseptium',
' ununoctium',
]
| 0 | 0 | 0 |
e1612bd5463c81961855ff2aaea65396b1ed23f7 | 1,183 | py | Python | setup.py | barlehmann/project_spring_2020 | 7bb2611e51c11d893540dbe1e3439d7406d87fba | [
"Apache-2.0"
] | null | null | null | setup.py | barlehmann/project_spring_2020 | 7bb2611e51c11d893540dbe1e3439d7406d87fba | [
"Apache-2.0"
] | null | null | null | setup.py | barlehmann/project_spring_2020 | 7bb2611e51c11d893540dbe1e3439d7406d87fba | [
"Apache-2.0"
] | null | null | null |
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="EEG/GSR Wellness Tracker",
version="0.0.1",
author="Bar Lehmann",
author_email="bar.lehmann@gmail.com",
description="A package to help explore connections between EEG/GSR with self reported measures of Mood and Focus using the MNE API",
long_description= "A package to facilitate easy introductory use of MNE Python and simple statistical analyses of EEG biometrics with self reported mood and focus ratings. Short records of about (10 seconds) can be used as entries, and GSR data can be added if possible/desired. Basic means of analyses of such connections between EEG/GSR with self reported measures of Mood and Focus are provided in the package. This package uses the MNE API",
long_description_content_type="text/markdown",
url="https://github.com/barlehmann/EEG_GSR_Wellness_Tracker",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
| 40.793103 | 449 | 0.725275 |
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="EEG/GSR Wellness Tracker",
version="0.0.1",
author="Bar Lehmann",
author_email="bar.lehmann@gmail.com",
description="A package to help explore connections between EEG/GSR with self reported measures of Mood and Focus using the MNE API",
long_description= "A package to facilitate easy introductory use of MNE Python and simple statistical analyses of EEG biometrics with self reported mood and focus ratings. Short records of about (10 seconds) can be used as entries, and GSR data can be added if possible/desired. Basic means of analyses of such connections between EEG/GSR with self reported measures of Mood and Focus are provided in the package. This package uses the MNE API",
long_description_content_type="text/markdown",
url="https://github.com/barlehmann/EEG_GSR_Wellness_Tracker",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
| 0 | 0 | 0 |
4bf620e1943b76b856bc1c75ba75cc33660fcf13 | 485 | py | Python | 3rd_week/02_selection_sort.py | ysong0504/algorithm | 76bfef16bd5bdc13b7ad260ee5f4e9dc62316403 | [
"MIT"
] | null | null | null | 3rd_week/02_selection_sort.py | ysong0504/algorithm | 76bfef16bd5bdc13b7ad260ee5f4e9dc62316403 | [
"MIT"
] | null | null | null | 3rd_week/02_selection_sort.py | ysong0504/algorithm | 76bfef16bd5bdc13b7ad260ee5f4e9dc62316403 | [
"MIT"
] | null | null | null | input = [4, 6, 2, 9, 1]
print(selection_sort(input))
# print(input) # [1, 2, 4, 6, 9] 가 되어야 합니다!
| 25.526316 | 88 | 0.536082 | input = [4, 6, 2, 9, 1]
def selection_sort(array): #O(N^2) - 배열반복문 두번진행됐다.
n = len(array)
# 이 부분을 채워보세요!
for i in range(n-1): #-1를 함으로서 마지막에 남은 한개의 원소는 비교하지 않는다. 왜냐 그전에 이미 정렬이 마무리되었을 거니까!
for j in range(n-i): # -i는 정렬이 끝난 애들은 다시 보기 않기 때무넹
# 현재보고 있는 인덱스
if array[i+j] < array[i]:
array[i + j], array[i] = array[i], array[i + j]
return array
print(selection_sort(input))
# print(input) # [1, 2, 4, 6, 9] 가 되어야 합니다!
| 541 | 0 | 23 |
04dcff38b12edaf5393625191a5c85d55ed8498b | 403 | py | Python | oo/Carro.py | luan-gomes/python-basic-exercises | 213844b421b27ab3e9c09be24d4efb37cc6fce08 | [
"MIT"
] | null | null | null | oo/Carro.py | luan-gomes/python-basic-exercises | 213844b421b27ab3e9c09be24d4efb37cc6fce08 | [
"MIT"
] | null | null | null | oo/Carro.py | luan-gomes/python-basic-exercises | 213844b421b27ab3e9c09be24d4efb37cc6fce08 | [
"MIT"
] | null | null | null |
carro = Carro("Chevrolet", "Tracker", "Branco")
carro.ligar()
carro.desligar()
carro.exibirInformacoes()
| 18.318182 | 47 | 0.687345 |
class Carro:
def __init__(self, marca, modelo, cor):
self.marca = marca
self.modelo = modelo
self.cor = cor
def ligar(self):
print("Ligando o carro!")
def desligar(self):
print("Desligando o carro!")
def exibirInformacoes(self):
print(self.marca, self.modelo, self.cor)
carro = Carro("Chevrolet", "Tracker", "Branco")
carro.ligar()
carro.desligar()
carro.exibirInformacoes()
| 181 | -9 | 124 |
5807592c185000a0680905d226dc7db080e9f91f | 376 | py | Python | plot_meteo_data.py | Mintaka/mypyladies_sqlite | 4f0762864f0cacd3de5234e90ccf517ee9760dbe | [
"MIT"
] | 1 | 2021-03-10T08:21:15.000Z | 2021-03-10T08:21:15.000Z | plot_meteo_data.py | Mintaka/mypyladies_sqlite | 4f0762864f0cacd3de5234e90ccf517ee9760dbe | [
"MIT"
] | 17 | 2021-02-28T20:48:37.000Z | 2021-03-31T06:06:01.000Z | plot_meteo_data.py | Mintaka/mypyladies_sqlite | 4f0762864f0cacd3de5234e90ccf517ee9760dbe | [
"MIT"
] | 4 | 2021-02-23T17:01:55.000Z | 2021-03-30T16:27:27.000Z | from pathlib import Path
import sqlite3
from matplotlib import pyplot as plt
from process_meteo_data import setup_db_connection, EXPORTS_FOLDER
connection, cursor = setup_db_connection()
data = cursor.execute(f'SELECT * FROM temperatures LIMIT 100;').fetchall()
_, date, temp = list(zip(*data))
plt.plot(temp)
plt.savefig(EXPORTS_FOLDER + '/' + "temp.png")
plt.show()
| 19.789474 | 74 | 0.760638 | from pathlib import Path
import sqlite3
from matplotlib import pyplot as plt
from process_meteo_data import setup_db_connection, EXPORTS_FOLDER
connection, cursor = setup_db_connection()
data = cursor.execute(f'SELECT * FROM temperatures LIMIT 100;').fetchall()
_, date, temp = list(zip(*data))
plt.plot(temp)
plt.savefig(EXPORTS_FOLDER + '/' + "temp.png")
plt.show()
| 0 | 0 | 0 |
a71b247387d6cb0eb3a8a7808dbf88dbf790d3ab | 281 | py | Python | tests/correctness/targets/java/Javac_source_target/Input/source_target.xpybuild.py | xpybuild/xpybuild | c71a73e47414871c8192381d0356ab62f5a58127 | [
"Apache-2.0"
] | 9 | 2017-02-06T16:45:46.000Z | 2021-12-05T09:42:58.000Z | tests/correctness/targets/java/Javac_source_target/Input/source_target.xpybuild.py | xpybuild/xpybuild | c71a73e47414871c8192381d0356ab62f5a58127 | [
"Apache-2.0"
] | 15 | 2019-01-11T19:39:34.000Z | 2022-01-08T11:11:35.000Z | tests/correctness/targets/java/Javac_source_target/Input/source_target.xpybuild.py | xpybuild/xpybuild | c71a73e47414871c8192381d0356ab62f5a58127 | [
"Apache-2.0"
] | 5 | 2017-02-06T16:51:17.000Z | 2020-12-02T17:36:30.000Z | from xpybuild.propertysupport import *
from xpybuild.buildcommon import *
from xpybuild.pathsets import *
from xpybuild.targets.java import *
import logging
Javac('${OUTPUT_DIR}/test-source-target/', ['./Simple.java'], []).option('javac.source', '7').option('javac.target', '8')
| 31.222222 | 121 | 0.736655 | from xpybuild.propertysupport import *
from xpybuild.buildcommon import *
from xpybuild.pathsets import *
from xpybuild.targets.java import *
import logging
Javac('${OUTPUT_DIR}/test-source-target/', ['./Simple.java'], []).option('javac.source', '7').option('javac.target', '8')
| 0 | 0 | 0 |
9c22dd938df65e858a051fdc3c845ce9406f89e1 | 1,034 | py | Python | tests/gopkg/test_translate.py | fabric8-analytics/fabric8-analytics-github-events-monitor | bda33c3521d36677a7c70056621e18ff24e525c3 | [
"Apache-2.0"
] | null | null | null | tests/gopkg/test_translate.py | fabric8-analytics/fabric8-analytics-github-events-monitor | bda33c3521d36677a7c70056621e18ff24e525c3 | [
"Apache-2.0"
] | 43 | 2018-12-10T10:53:05.000Z | 2021-12-13T19:53:58.000Z | tests/gopkg/test_translate.py | fabric8-analytics/fabric8-analytics-github-events-monitor | bda33c3521d36677a7c70056621e18ff24e525c3 | [
"Apache-2.0"
] | 5 | 2018-12-07T11:20:55.000Z | 2022-01-28T23:31:25.000Z | """Test the gopkg module."""
from ghmonitor.gopkg.translate import get_repo_from_random_urn, translate, GITHUB_REPO_RE
def test_get_repo_urn():
"""Test the function. Be careful though, it requires an Internet access."""
assert 'kubernetes/metrics' == get_repo_from_random_urn('k8s.io/metrics')
assert get_repo_from_random_urn('seznam.cz') is None
def test_github_re():
"""Test the regular expression."""
assert GITHUB_REPO_RE.match('bitbucket.com/user/repo') is None
m = GITHUB_REPO_RE.match('github.com/user/project')
assert m.group('user') == 'user'
assert m.group('repo') == 'project'
m = GITHUB_REPO_RE.match('github.com/user/project/folder')
assert m.group('user') == 'user'
assert m.group('repo') == 'project'
def test_translate():
"""Test the translation. Again, needs Internet access."""
assert 'kubernetes/metrics' == translate('k8s.io/metrics')
assert 'user/project' == translate('github.com/user/project')
assert translate('launchpad.net/project') is None
| 36.928571 | 89 | 0.705996 | """Test the gopkg module."""
from ghmonitor.gopkg.translate import get_repo_from_random_urn, translate, GITHUB_REPO_RE
def test_get_repo_urn():
"""Test the function. Be careful though, it requires an Internet access."""
assert 'kubernetes/metrics' == get_repo_from_random_urn('k8s.io/metrics')
assert get_repo_from_random_urn('seznam.cz') is None
def test_github_re():
"""Test the regular expression."""
assert GITHUB_REPO_RE.match('bitbucket.com/user/repo') is None
m = GITHUB_REPO_RE.match('github.com/user/project')
assert m.group('user') == 'user'
assert m.group('repo') == 'project'
m = GITHUB_REPO_RE.match('github.com/user/project/folder')
assert m.group('user') == 'user'
assert m.group('repo') == 'project'
def test_translate():
"""Test the translation. Again, needs Internet access."""
assert 'kubernetes/metrics' == translate('k8s.io/metrics')
assert 'user/project' == translate('github.com/user/project')
assert translate('launchpad.net/project') is None
| 0 | 0 | 0 |
f5a42791d7219800d12d437fb87fac92788c7a05 | 731 | py | Python | formatter.py | RichPayne/Jenny | d7607bee71cdfafdf446713a58486433b6747eeb | [
"MIT"
] | 18 | 2017-01-11T22:27:30.000Z | 2021-06-21T11:22:31.000Z | formatter.py | RichPayne/Jenny | d7607bee71cdfafdf446713a58486433b6747eeb | [
"MIT"
] | null | null | null | formatter.py | RichPayne/Jenny | d7607bee71cdfafdf446713a58486433b6747eeb | [
"MIT"
] | 6 | 2017-01-10T22:33:12.000Z | 2021-08-21T19:02:24.000Z | import mailer as m
import consoleLogger as cl
import fileLogger as fl
'''
Created by: Richard Payne
Created on: 05/08/17
Desc: Takes string from connection and extracts username, password
and IP address of the attempted login.
'''
| 23.580645 | 73 | 0.538988 | import mailer as m
import consoleLogger as cl
import fileLogger as fl
'''
Created by: Richard Payne
Created on: 05/08/17
Desc: Takes string from connection and extracts username, password
and IP address of the attempted login.
'''
def format(ip, usr, pw):
ip = ip
usr = usr.rsplit()
pw = pw.rsplit()
if not usr and not pw:
pass
elif not usr:
pass
elif not pw:
pw = "No password provided."
m.send(ip, str(usr[0]), pw)
cl.log(ip, str(usr[0]), pw)
fl.log(str(usr[0]), pw)
else:
m.send(ip, str(usr[0]), str(pw[0]))
cl.log(ip, str(usr[0]), str(pw[0]))
fl.log(str(usr[0]), str(pw[0]))
| 444 | 0 | 25 |
47d60851cc18b7fd14824a9784874a0d7c89358a | 602 | py | Python | shop/urls.py | knkemree/django_ecommerce_website | 19876976bc872cf4835778d12d82756c573cf3b9 | [
"bzip2-1.0.6"
] | null | null | null | shop/urls.py | knkemree/django_ecommerce_website | 19876976bc872cf4835778d12d82756c573cf3b9 | [
"bzip2-1.0.6"
] | 8 | 2020-06-06T01:22:59.000Z | 2022-01-13T02:14:24.000Z | shop/urls.py | knkemree/django_ecommerce_website | 19876976bc872cf4835778d12d82756c573cf3b9 | [
"bzip2-1.0.6"
] | null | null | null | from django.urls import path
from . import views
from .views import (
SearchsProductView
)
app_name = 'shop'
urlpatterns = [
#path('', views.index, name='index'),
path('', views.product_list, name='product_list'),
path('shop/', views.product_list, name='product_list'),
path('addtocartform/', views.addtocartform, name='addtocartform'),
path('search/', SearchsProductView.as_view(), name='query'),
path('<slug:category_slug>/', views.product_list, name='product_list_by_category'),
path('<int:id>/<slug:slug>/', views.product_detail, name='product_detail'),
] | 27.363636 | 87 | 0.684385 | from django.urls import path
from . import views
from .views import (
SearchsProductView
)
app_name = 'shop'
urlpatterns = [
#path('', views.index, name='index'),
path('', views.product_list, name='product_list'),
path('shop/', views.product_list, name='product_list'),
path('addtocartform/', views.addtocartform, name='addtocartform'),
path('search/', SearchsProductView.as_view(), name='query'),
path('<slug:category_slug>/', views.product_list, name='product_list_by_category'),
path('<int:id>/<slug:slug>/', views.product_detail, name='product_detail'),
] | 0 | 0 | 0 |
47543c1ac833e87d3d5612f874d23ecb800b05cb | 2,165 | py | Python | rq1.py | ai-se/TimeLIME | eaf8cd44715bb1f6dcac29f4c0bfb2c93809ac98 | [
"MIT"
] | null | null | null | rq1.py | ai-se/TimeLIME | eaf8cd44715bb1f6dcac29f4c0bfb2c93809ac98 | [
"MIT"
] | null | null | null | rq1.py | ai-se/TimeLIME | eaf8cd44715bb1f6dcac29f4c0bfb2c93809ac98 | [
"MIT"
] | 1 | 2021-04-28T17:21:30.000Z | 2021-04-28T17:21:30.000Z | from planner import *
from othertools import *
import matplotlib.pyplot as plt
if __name__ == "__main__":
main()
| 42.45098 | 114 | 0.62448 | from planner import *
from othertools import *
import matplotlib.pyplot as plt
def main():
score_2t = readfile('rq1_TimeLIME.csv')
score_2f = readfile('rq1_LIME.csv')
scores2_x = readfile('rq1_XTREE.csv')
scores2_alve = readfile('rq1_Alves.csv')
scores2_shat = readfile('rq1_Shat.csv')
scores2_oliv = readfile('rq1_Oliv.csv')
score2_rw = readfile('rq1_Random.csv')
plt.subplots(figsize=(7, 7))
plt.rcParams.update({'font.size': 16})
# ind=np.arange(10)
N = len(scores2_x)
width = 0.25
dummy1, dummy2, dummy3, dummy4, dummy5, dummy6, dummy7 = [], [], [], [], [], [], []
for i in range(0, len(scores2_x)):
dummy1.append(np.round(1 - np.mean(score_2t[i]), 3) * 20)
dummy2.append(np.round(1 - np.mean(score_2f[i]), 3) * 20)
dummy3.append(np.round(1 - np.mean(scores2_x[i]), 3) * 20)
dummy4.append(np.round(1 - np.mean(scores2_alve[i]), 3) * 20)
dummy5.append(np.round(1 - np.mean(scores2_shat[i]), 3) * 20)
dummy6.append(np.round(1 - np.mean(scores2_oliv[i]), 3) * 20)
dummy7.append(np.round(1 - np.mean(score2_rw[i]), 3) * 20)
plt.scatter(np.arange(N), dummy2, label='Classical LIME', s=100, marker='o')
plt.scatter(np.arange(N), dummy3, label='XTREE', s=100, marker='o')
plt.scatter(np.arange(N), dummy4, label='Alves', s=100, marker='o')
plt.scatter(np.arange(N), dummy5, label='Shatnawi', s=100, marker='o')
plt.scatter(np.arange(N), dummy6, label='Oliveira', s=100, marker='o')
plt.scatter(np.arange(N), dummy7, label='RandomWalk', s=100, marker='v')
plt.plot(np.arange(N), dummy1, label='TimeLIME', marker='^', markersize=10, color='#22406D')
# plt.ylim(-11,130)
plt.xticks(np.arange(N), ['jedit', 'camel1', 'camel2', 'log4j', 'xalan', 'ant', 'velocity', 'poi', 'synapse'])
plt.yticks([0, 2, 4, 6, 8, 10, 12])
plt.subplots_adjust(bottom=0.2, left=0, right=1.1)
plt.grid(axis='y')
plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.1), fancybox=True, shadow=True, ncol=3)
plt.savefig("rq1", dpi=200, bbox_inches='tight')
plt.show()
return
if __name__ == "__main__":
main()
| 2,022 | 0 | 23 |
c0a16e3749506bef84fa901fa7d4b90d64b11cac | 129 | py | Python | livechat/customer/__init__.py | livechat/lc-sdk-python | 536483590b9600ef1bc86fe36a1f810368b85a9d | [
"Apache-2.0"
] | 5 | 2021-03-22T19:22:05.000Z | 2022-01-18T13:38:22.000Z | livechat/customer/__init__.py | livechat/lc-sdk-python | 536483590b9600ef1bc86fe36a1f810368b85a9d | [
"Apache-2.0"
] | 5 | 2021-10-05T14:44:33.000Z | 2022-02-16T07:33:51.000Z | livechat/customer/__init__.py | livechat/lc-sdk-python | 536483590b9600ef1bc86fe36a1f810368b85a9d | [
"Apache-2.0"
] | null | null | null | #pylint: disable=C0114
from livechat.customer.rtm.client import CustomerRTM
from livechat.customer.web.client import CustomerWeb
| 32.25 | 52 | 0.852713 | #pylint: disable=C0114
from livechat.customer.rtm.client import CustomerRTM
from livechat.customer.web.client import CustomerWeb
| 0 | 0 | 0 |
8532b6e0caf9273719815c7bb094f49cd9b9c2d3 | 1,821 | py | Python | master-branches/add_master_branch.py | FrNecas/source-git-onboarding | 550a39045af2ddb9517e2c323a2697a40dd27f6f | [
"MIT"
] | null | null | null | master-branches/add_master_branch.py | FrNecas/source-git-onboarding | 550a39045af2ddb9517e2c323a2697a40dd27f6f | [
"MIT"
] | 4 | 2020-10-26T16:06:34.000Z | 2022-03-30T13:32:28.000Z | master-branches/add_master_branch.py | FrNecas/source-git-onboarding | 550a39045af2ddb9517e2c323a2697a40dd27f6f | [
"MIT"
] | 6 | 2020-10-20T15:14:07.000Z | 2022-03-16T09:56:31.000Z | import os
import shutil
from logging import getLogger
from pathlib import Path
from shutil import copyfile
import git
import requests
from ogr.services.pagure import PagureService
logger = getLogger(__name__)
work_dir = Path("/tmp/playground")
readme_path = Path(__file__).parent / "README.md"
service = PagureService(
token=os.getenv("PAGURE_TOKEN"), instance_url="https://git.stg.centos.org/"
)
if __name__ == "__main__":
if not work_dir.is_dir():
logger.warning("Your work_dir is missing.")
page = "https://git.stg.centos.org/api/0/projects?namespace=source-git&short=true"
i = 0
while True:
logger.info(page)
r = requests.get(page)
for p in r.json()["projects"]:
AddMasterBranch(p["name"]).run()
page = r.json()["pagination"]["next"]
if not page:
break
i = i + 1
| 29.370968 | 86 | 0.637013 | import os
import shutil
from logging import getLogger
from pathlib import Path
from shutil import copyfile
import git
import requests
from ogr.services.pagure import PagureService
logger = getLogger(__name__)
work_dir = Path("/tmp/playground")
readme_path = Path(__file__).parent / "README.md"
service = PagureService(
token=os.getenv("PAGURE_TOKEN"), instance_url="https://git.stg.centos.org/"
)
class AddMasterBranch:
def __init__(self, pkg_name):
self.pkg_name = pkg_name
self.project = service.get_project(namespace="source-git", repo=self.pkg_name)
self.pkg_dir = work_dir / self.pkg_name
def run(self):
logger.info(f"Processing package: {self.pkg_name}")
if "master" in self.project.get_branches():
logger.info("\tBranch already exists")
else:
logger.info("\tCreating master branch")
self.add_master()
def add_master(self):
if not self.pkg_dir.is_dir():
git.Git(work_dir).clone(self.project.get_git_urls()["ssh"])
repo = git.Repo(self.pkg_dir)
copyfile(readme_path, self.pkg_dir / "README.md")
repo.index.add([str(self.pkg_dir / "README.md")])
repo.index.commit("Initialize master branch")
repo.git.push("origin", "master")
# cleanup
shutil.rmtree(self.pkg_dir)
if __name__ == "__main__":
if not work_dir.is_dir():
logger.warning("Your work_dir is missing.")
page = "https://git.stg.centos.org/api/0/projects?namespace=source-git&short=true"
i = 0
while True:
logger.info(page)
r = requests.get(page)
for p in r.json()["projects"]:
AddMasterBranch(p["name"]).run()
page = r.json()["pagination"]["next"]
if not page:
break
i = i + 1
| 843 | 1 | 103 |
442feb0e4bfea18cece7737dc7439b2f3bbaf507 | 1,468 | py | Python | lintcode/86.py | jianershi/algorithm | c3c38723b9c5f1cc745550d89e228f92fd4abfb2 | [
"MIT"
] | 1 | 2021-01-08T06:57:49.000Z | 2021-01-08T06:57:49.000Z | lintcode/86.py | jianershi/algorithm | c3c38723b9c5f1cc745550d89e228f92fd4abfb2 | [
"MIT"
] | null | null | null | lintcode/86.py | jianershi/algorithm | c3c38723b9c5f1cc745550d89e228f92fd4abfb2 | [
"MIT"
] | 1 | 2021-01-08T06:57:52.000Z | 2021-01-08T06:57:52.000Z | """
86. Binary Search Tree Iterator
https://www.lintcode.com/problem/binary-search-tree-iterator/description
the difference between this method and the standard method is:
if one has right subtree, it no longer keep itself in the stack.
so when taken out of the stack, it does not need to make extra step to remove itself.
the downside of this simplified method is it is not appliable to find
kth closest member in a binary tree (maybe?)
"""
"""
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
Example of iterate a tree:
iterator = BSTIterator(root)
while iterator.hasNext():
node = iterator.next()
do something for node
"""
class BSTIterator:
"""
@param: root: The root of binary tree.
"""
"""
@return: True if there has next node, or false
"""
"""
@return: return next node
"""
| 23.677419 | 85 | 0.615123 | """
86. Binary Search Tree Iterator
https://www.lintcode.com/problem/binary-search-tree-iterator/description
the difference between this method and the standard method is:
if one has right subtree, it no longer keep itself in the stack.
so when taken out of the stack, it does not need to make extra step to remove itself.
the downside of this simplified method is it is not appliable to find
kth closest member in a binary tree (maybe?)
"""
"""
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
Example of iterate a tree:
iterator = BSTIterator(root)
while iterator.hasNext():
node = iterator.next()
do something for node
"""
class BSTIterator:
"""
@param: root: The root of binary tree.
"""
def __init__(self, root):
# do intialization if necessary
self.root = root
self.stack = []
while root:
self.stack.append(root)
root = root.left
"""
@return: True if there has next node, or false
"""
def hasNext(self):
# write your code here
return len(self.stack) > 0
"""
@return: return next node
"""
def next(self):
# write your code here
if not self.hasNext():
return None
node = self.stack.pop()
p = node.right
while p:
self.stack.append(p)
p = p.left
return node
| 472 | 0 | 78 |
6ee76beff0708fcda0432ab4f6d907a3ceecdcc1 | 153 | py | Python | config/settings/deploy_example_2.py | hyphae/apis-service_center | 0a6f0d349a435a3c739c2ee1ebdcc010c2dd13d1 | [
"Apache-2.0"
] | null | null | null | config/settings/deploy_example_2.py | hyphae/apis-service_center | 0a6f0d349a435a3c739c2ee1ebdcc010c2dd13d1 | [
"Apache-2.0"
] | 3 | 2021-03-20T10:35:08.000Z | 2022-03-21T16:54:05.000Z | config/settings/deploy_example_2.py | hyphae/apis-service_center | 0a6f0d349a435a3c739c2ee1ebdcc010c2dd13d1 | [
"Apache-2.0"
] | null | null | null | """切り替え用 settings ファイル
base.py をベースに上書きする.
"""
from .base import *
DEBUG = False
ALLOWED_HOSTS = ['*']
STATIC_URL = '/apis-service-center/app/static/'
| 15.3 | 47 | 0.69281 | """切り替え用 settings ファイル
base.py をベースに上書きする.
"""
from .base import *
DEBUG = False
ALLOWED_HOSTS = ['*']
STATIC_URL = '/apis-service-center/app/static/'
| 0 | 0 | 0 |