hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0a6f3a549a3f8a41861c7ee807040649078ca252 | 3,461 | py | Python | ss_baselines/saven/ddppo/algo/ddppo.py | gtatiya/sound-spaces | 52792865b2a491883bdbfebbc015acaf980a8d3a | [
"CC-BY-4.0"
] | 1 | 2021-09-28T16:04:09.000Z | 2021-09-28T16:04:09.000Z | ss_baselines/saven/ddppo/algo/ddppo.py | gtatiya/sound-spaces | 52792865b2a491883bdbfebbc015acaf980a8d3a | [
"CC-BY-4.0"
] | null | null | null | ss_baselines/saven/ddppo/algo/ddppo.py | gtatiya/sound-spaces | 52792865b2a491883bdbfebbc015acaf980a8d3a | [
"CC-BY-4.0"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import Tuple
import torch
import torch.distributed as distrib
from ss_baselines.saven.models.rollout_storage import RolloutStorage
from ss_baselines.saven.ppo.ppo import PPO
EPS_PPO = 1e-5
def distributed_mean_and_var(
values: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
r"""Computes the mean and variances of a tensor over multiple workers.
This method is equivalent to first collecting all versions of values and
then computing the mean and variance locally over that
:param values: (*,) shaped tensors to compute mean and variance over. Assumed
to be solely the workers local copy of this tensor,
the resultant mean and variance will be computed
over _all_ workers version of this tensor.
"""
assert distrib.is_initialized(), "Distributed must be initialized"
world_size = distrib.get_world_size()
mean = values.mean()
distrib.all_reduce(mean)
mean /= world_size
sq_diff = (values - mean).pow(2).mean()
distrib.all_reduce(sq_diff)
var = sq_diff / world_size
return mean, var
| 34.959596 | 97 | 0.649812 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import Tuple
import torch
import torch.distributed as distrib
from ss_baselines.saven.models.rollout_storage import RolloutStorage
from ss_baselines.saven.ppo.ppo import PPO
EPS_PPO = 1e-5
def distributed_mean_and_var(
values: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
r"""Computes the mean and variances of a tensor over multiple workers.
This method is equivalent to first collecting all versions of values and
then computing the mean and variance locally over that
:param values: (*,) shaped tensors to compute mean and variance over. Assumed
to be solely the workers local copy of this tensor,
the resultant mean and variance will be computed
over _all_ workers version of this tensor.
"""
assert distrib.is_initialized(), "Distributed must be initialized"
world_size = distrib.get_world_size()
mean = values.mean()
distrib.all_reduce(mean)
mean /= world_size
sq_diff = (values - mean).pow(2).mean()
distrib.all_reduce(sq_diff)
var = sq_diff / world_size
return mean, var
class DecentralizedDistributedMixin:
def _get_advantages_distributed(
self, rollouts: RolloutStorage
) -> torch.Tensor:
advantages = rollouts.returns[:-1] - rollouts.value_preds[:-1]
if not self.use_normalized_advantage:
return advantages
mean, var = distributed_mean_and_var(advantages)
return (advantages - mean) / (var.sqrt() + EPS_PPO)
def init_distributed(self, find_unused_params: bool = True) -> None:
r"""Initializes distributed training for the model
1. Broadcasts the model weights from world_rank 0 to all other workers
2. Adds gradient hooks to the model
:param find_unused_params: Whether or not to filter out unused parameters
before gradient reduction. This *must* be True if
there are any parameters in the model that where unused in the
forward pass, otherwise the gradient reduction
will not work correctly.
"""
# NB: Used to hide the hooks from the nn.Module,
# so they don't show up in the state_dict
class Guard:
def __init__(self, model, device):
if torch.cuda.is_available():
self.ddp = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[device], output_device=device
)
else:
self.ddp = torch.nn.parallel.DistributedDataParallel(model)
self._ddp_hooks = Guard(self.actor_critic, self.device)
self.get_advantages = self._get_advantages_distributed
self.reducer = self._ddp_hooks.ddp.reducer
self.find_unused_params = find_unused_params
def before_backward(self, loss):
super().before_backward(loss)
if self.find_unused_params:
self.reducer.prepare_for_backward([loss])
else:
self.reducer.prepare_for_backward([])
class DDPPO(DecentralizedDistributedMixin, PPO):
pass
| 873 | 1,177 | 46 |
2261ff7ea8e67e5456531bd1d61559ba5cca2948 | 2,166 | py | Python | generated-libraries/python/netapp/ntdtest/group2_view_info.py | radekg/netapp-ontap-lib-get | 6445ebb071ec147ea82a486fbe9f094c56c5c40d | [
"MIT"
] | 2 | 2017-03-28T15:31:26.000Z | 2018-08-16T22:15:18.000Z | generated-libraries/python/netapp/ntdtest/group2_view_info.py | radekg/netapp-ontap-lib-get | 6445ebb071ec147ea82a486fbe9f094c56c5c40d | [
"MIT"
] | null | null | null | generated-libraries/python/netapp/ntdtest/group2_view_info.py | radekg/netapp-ontap-lib-get | 6445ebb071ec147ea82a486fbe9f094c56c5c40d | [
"MIT"
] | null | null | null | from netapp.netapp_object import NetAppObject
class Group2ViewInfo(NetAppObject):
"""
2nd nested typedef at level 1
"""
_field_5 = None
@property
def field_5(self):
"""
Generic/Dummy Field 5
Attributes: required-for-create, modifiable
"""
return self._field_5
@field_5.setter
_field_6 = None
@property
def field_6(self):
"""
Generic/Dummy Field 6
Attributes: required-for-create, modifiable
"""
return self._field_6
@field_6.setter
_field_7 = None
@property
def field_7(self):
"""
Generic/Dummy Field 7
Attributes: non-creatable, non-modifiable
"""
return self._field_7
@field_7.setter
_field_8 = None
@property
def field_8(self):
"""
Generic/Dummy Field 8
Attributes: non-creatable, non-modifiable
"""
return self._field_8
@field_8.setter
@staticmethod
@staticmethod
| 25.785714 | 89 | 0.537858 | from netapp.netapp_object import NetAppObject
class Group2ViewInfo(NetAppObject):
"""
2nd nested typedef at level 1
"""
_field_5 = None
@property
def field_5(self):
"""
Generic/Dummy Field 5
Attributes: required-for-create, modifiable
"""
return self._field_5
@field_5.setter
def field_5(self, val):
if val != None:
self.validate('field_5', val)
self._field_5 = val
_field_6 = None
@property
def field_6(self):
"""
Generic/Dummy Field 6
Attributes: required-for-create, modifiable
"""
return self._field_6
@field_6.setter
def field_6(self, val):
if val != None:
self.validate('field_6', val)
self._field_6 = val
_field_7 = None
@property
def field_7(self):
"""
Generic/Dummy Field 7
Attributes: non-creatable, non-modifiable
"""
return self._field_7
@field_7.setter
def field_7(self, val):
if val != None:
self.validate('field_7', val)
self._field_7 = val
_field_8 = None
@property
def field_8(self):
"""
Generic/Dummy Field 8
Attributes: non-creatable, non-modifiable
"""
return self._field_8
@field_8.setter
def field_8(self, val):
if val != None:
self.validate('field_8', val)
self._field_8 = val
@staticmethod
def get_api_name():
return "group2-view-info"
@staticmethod
def get_desired_attrs():
return [
'field-5',
'field-6',
'field-7',
'field-8',
]
def describe_properties(self):
return {
'field_5': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'field_6': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'field_7': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'field_8': { 'class': basestring, 'is_list': False, 'required': 'optional' },
}
| 936 | 0 | 187 |
047464dd0842beeeadd509639a066e71adabcead | 1,427 | py | Python | chapter09/chain.py | JoeanAmiee/Mastering-Python-Design-Patterns-Second-Edition | 89c55dcf5e1e0e730dde593b487050f360371932 | [
"MIT"
] | 278 | 2018-08-16T12:59:24.000Z | 2022-03-21T08:21:11.000Z | chapter09/chain.py | 50611/Mastering-Python-Design-Patterns-Second-Edition | 6efc4a935f15d2aa6c840131f72fb8c53a493a93 | [
"MIT"
] | 4 | 2019-05-16T11:44:45.000Z | 2022-02-04T07:24:47.000Z | chapter09/chain.py | 50611/Mastering-Python-Design-Patterns-Second-Edition | 6efc4a935f15d2aa6c840131f72fb8c53a493a93 | [
"MIT"
] | 166 | 2018-08-13T21:47:16.000Z | 2022-03-18T12:20:31.000Z |
if __name__ == '__main__':
main()
| 26.924528 | 55 | 0.553609 | class Event:
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
class Widget:
def __init__(self, parent=None):
self.parent = parent
def handle(self, event):
handler = f'handle_{event}'
if hasattr(self, handler):
method = getattr(self, handler)
method(event)
elif self.parent is not None:
self.parent.handle(event)
elif hasattr(self, 'handle_default'):
self.handle_default(event)
class MainWindow(Widget):
def handle_close(self, event):
print(f'MainWindow: {event}')
def handle_default(self, event):
print(f'MainWindow Default: {event}')
class SendDialog(Widget):
def handle_paint(self, event):
print(f'SendDialog: {event}')
class MsgText(Widget):
def handle_down(self, event):
print(f'MsgText: {event}')
def main():
mw = MainWindow()
sd = SendDialog(mw)
msg = MsgText(sd)
for e in ('down', 'paint', 'unhandled', 'close'):
evt = Event(e)
print(f'Sending event -{evt}- to MainWindow')
mw.handle(evt)
print(f'Sending event -{evt}- to SendDialog')
sd.handle(evt)
print(f'Sending event -{evt}- to MsgText')
msg.handle(evt)
if __name__ == '__main__':
main()
| 1,008 | -8 | 384 |
dc532744bff28dc0b15b2faea563fe4cb6ff5ae6 | 5,260 | py | Python | venv/Lib/site-packages/PySide2/examples/sql/books/ui_bookwindow.py | Farhan-Malik/advance-hand-gesture | 0ebe21ddd7c8c2eb14746678be57b33d38c47205 | [
"MIT"
] | 41 | 2021-06-19T13:57:18.000Z | 2021-12-02T17:08:53.000Z | venv/Lib/site-packages/PySide2/examples/sql/books/ui_bookwindow.py | Farhan-Malik/advance-hand-gesture | 0ebe21ddd7c8c2eb14746678be57b33d38c47205 | [
"MIT"
] | 20 | 2021-05-03T18:02:23.000Z | 2022-03-12T12:01:04.000Z | venvWIN/Lib/site-packages/PySide2/examples/sql/books/ui_bookwindow.py | NeroNekro/PortableController | a8bbfc1b6c8cb2c919e48eb0104e42f436059b18 | [
"BSD-3-Clause"
] | 4 | 2021-07-02T03:09:51.000Z | 2021-11-25T13:00:10.000Z | # -*- coding: utf-8 -*-
################################################################################
## Form generated from reading UI file 'bookwindow.ui'
##
## Created by: Qt User Interface Compiler version 5.14.0
##
## WARNING! All changes made in this file will be lost when recompiling UI file!
################################################################################
from PySide2.QtCore import (QCoreApplication, QMetaObject, QObject, QPoint,
QRect, QSize, QUrl, Qt)
from PySide2.QtGui import (QBrush, QColor, QConicalGradient, QFont,
QFontDatabase, QIcon, QLinearGradient, QPalette, QPainter, QPixmap,
QRadialGradient)
from PySide2.QtWidgets import *
# setupUi
# retranslateUi
| 40.461538 | 96 | 0.68251 | # -*- coding: utf-8 -*-
################################################################################
## Form generated from reading UI file 'bookwindow.ui'
##
## Created by: Qt User Interface Compiler version 5.14.0
##
## WARNING! All changes made in this file will be lost when recompiling UI file!
################################################################################
from PySide2.QtCore import (QCoreApplication, QMetaObject, QObject, QPoint,
QRect, QSize, QUrl, Qt)
from PySide2.QtGui import (QBrush, QColor, QConicalGradient, QFont,
QFontDatabase, QIcon, QLinearGradient, QPalette, QPainter, QPixmap,
QRadialGradient)
from PySide2.QtWidgets import *
class Ui_BookWindow(object):
def setupUi(self, BookWindow):
if BookWindow.objectName():
BookWindow.setObjectName(u"BookWindow")
BookWindow.resize(601, 420)
self.centralWidget = QWidget(BookWindow)
self.centralWidget.setObjectName(u"centralWidget")
self.vboxLayout = QVBoxLayout(self.centralWidget)
self.vboxLayout.setSpacing(6)
self.vboxLayout.setObjectName(u"vboxLayout")
self.vboxLayout.setContentsMargins(9, 9, 9, 9)
self.groupBox = QGroupBox(self.centralWidget)
self.groupBox.setObjectName(u"groupBox")
self.vboxLayout1 = QVBoxLayout(self.groupBox)
self.vboxLayout1.setSpacing(6)
self.vboxLayout1.setObjectName(u"vboxLayout1")
self.vboxLayout1.setContentsMargins(9, 9, 9, 9)
self.bookTable = QTableView(self.groupBox)
self.bookTable.setObjectName(u"bookTable")
self.bookTable.setSelectionBehavior(QAbstractItemView.SelectRows)
self.vboxLayout1.addWidget(self.bookTable)
self.groupBox_2 = QGroupBox(self.groupBox)
self.groupBox_2.setObjectName(u"groupBox_2")
self.formLayout = QFormLayout(self.groupBox_2)
self.formLayout.setObjectName(u"formLayout")
self.label_5 = QLabel(self.groupBox_2)
self.label_5.setObjectName(u"label_5")
self.formLayout.setWidget(0, QFormLayout.LabelRole, self.label_5)
self.titleEdit = QLineEdit(self.groupBox_2)
self.titleEdit.setObjectName(u"titleEdit")
self.titleEdit.setEnabled(True)
self.formLayout.setWidget(0, QFormLayout.FieldRole, self.titleEdit)
self.label_2 = QLabel(self.groupBox_2)
self.label_2.setObjectName(u"label_2")
self.formLayout.setWidget(1, QFormLayout.LabelRole, self.label_2)
self.authorEdit = QComboBox(self.groupBox_2)
self.authorEdit.setObjectName(u"authorEdit")
self.authorEdit.setEnabled(True)
self.formLayout.setWidget(1, QFormLayout.FieldRole, self.authorEdit)
self.label_3 = QLabel(self.groupBox_2)
self.label_3.setObjectName(u"label_3")
self.formLayout.setWidget(2, QFormLayout.LabelRole, self.label_3)
self.genreEdit = QComboBox(self.groupBox_2)
self.genreEdit.setObjectName(u"genreEdit")
self.genreEdit.setEnabled(True)
self.formLayout.setWidget(2, QFormLayout.FieldRole, self.genreEdit)
self.label_4 = QLabel(self.groupBox_2)
self.label_4.setObjectName(u"label_4")
self.formLayout.setWidget(3, QFormLayout.LabelRole, self.label_4)
self.yearEdit = QSpinBox(self.groupBox_2)
self.yearEdit.setObjectName(u"yearEdit")
self.yearEdit.setEnabled(True)
self.yearEdit.setMinimum(-1000)
self.yearEdit.setMaximum(2100)
self.formLayout.setWidget(3, QFormLayout.FieldRole, self.yearEdit)
self.label = QLabel(self.groupBox_2)
self.label.setObjectName(u"label")
self.formLayout.setWidget(4, QFormLayout.LabelRole, self.label)
self.ratingEdit = QSpinBox(self.groupBox_2)
self.ratingEdit.setObjectName(u"ratingEdit")
self.ratingEdit.setMaximum(5)
self.formLayout.setWidget(4, QFormLayout.FieldRole, self.ratingEdit)
self.vboxLayout1.addWidget(self.groupBox_2)
self.vboxLayout.addWidget(self.groupBox)
BookWindow.setCentralWidget(self.centralWidget)
QWidget.setTabOrder(self.bookTable, self.titleEdit)
QWidget.setTabOrder(self.titleEdit, self.authorEdit)
QWidget.setTabOrder(self.authorEdit, self.genreEdit)
QWidget.setTabOrder(self.genreEdit, self.yearEdit)
self.retranslateUi(BookWindow)
QMetaObject.connectSlotsByName(BookWindow)
# setupUi
def retranslateUi(self, BookWindow):
BookWindow.setWindowTitle(QCoreApplication.translate("BookWindow", u"Books", None))
self.groupBox.setTitle("")
self.groupBox_2.setTitle(QCoreApplication.translate("BookWindow", u"Details", None))
self.label_5.setText(QCoreApplication.translate("BookWindow", u"<b>Title:</b>", None))
self.label_2.setText(QCoreApplication.translate("BookWindow", u"<b>Author: </b>", None))
self.label_3.setText(QCoreApplication.translate("BookWindow", u"<b>Genre:</b>", None))
self.label_4.setText(QCoreApplication.translate("BookWindow", u"<b>Year:</b>", None))
self.yearEdit.setPrefix("")
self.label.setText(QCoreApplication.translate("BookWindow", u"<b>Rating:</b>", None))
# retranslateUi
| 4,459 | 7 | 76 |
24c230a304bff436e218ccf5ebd50b2cdc35065c | 2,356 | py | Python | src/analysisService.py | openeuler-mirror/hpcrunner | 338bc16b1dceb3909a49a6a51c0946c4b1a30f77 | [
"MulanPSL-1.0"
] | null | null | null | src/analysisService.py | openeuler-mirror/hpcrunner | 338bc16b1dceb3909a49a6a51c0946c4b1a30f77 | [
"MulanPSL-1.0"
] | null | null | null | src/analysisService.py | openeuler-mirror/hpcrunner | 338bc16b1dceb3909a49a6a51c0946c4b1a30f77 | [
"MulanPSL-1.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from machineService import MachineService
from configService import ConfigService
from downloadService import DownloadService
from installService import InstallService
from envService import EnvService
from buildService import BuildService
from runService import RunService
from perfService import PerfService
from testService import TestService
from benchService import BenchmarkService
from containerService import ContainerService
| 25.608696 | 63 | 0.669355 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from machineService import MachineService
from configService import ConfigService
from downloadService import DownloadService
from installService import InstallService
from envService import EnvService
from buildService import BuildService
from runService import RunService
from perfService import PerfService
from testService import TestService
from benchService import BenchmarkService
from containerService import ContainerService
class AnalysisService:
def __init__(self):
self.jmachine = MachineService()
self.jconfig = ConfigService()
self.jdownload = DownloadService()
self.jinstall = InstallService()
self.jenv = EnvService()
self.jbuild = BuildService()
self.jrun = RunService()
self.jperf = PerfService()
self.jbenchmark = BenchmarkService()
self.jcontainer = ContainerService()
self.jtest = TestService()
def get_machine_info(self):
self.jmachine.output_machine_info()
def bench(self, bench_case):
self.jbenchmark.output_bench_info(bench_case)
def switch_config(self, config_file):
self.jconfig.switch_config(config_file)
def test(self):
self.jtest.test()
def download(self):
self.jdownload.download()
def check_network(self):
self.jdownload.check_network()
def gpu_perf(self):
self.jperf.gpu_perf()
def ncu_perf(self, kernel):
self.jperf.ncu_perf(kernel)
def perf(self):
self.jperf.perf()
def kperf(self):
self.jperf.kperf()
def run(self):
self.jrun.run()
def batch_run(self):
self.jrun.batch_run()
def clean(self):
self.jbuild.clean()
def build(self):
self.jbuild.build()
def env(self):
self.jenv.env()
def install(self,software_path, compiler_mpi_info):
self.jinstall.install(software_path, compiler_mpi_info)
def get_install_list(self):
self.jinstall.list()
def remove(self,software_path):
self.jinstall.remove(software_path)
def find(self,content):
self.jinstall.find(content)
def install_deps(self):
self.jinstall.install_depend()
def gen_def(self, image):
self.jcontainer.gen_def(image)
| 1,220 | 1 | 652 |
79c15f3fee537e6b2c6b3615c01b789101bd0a90 | 475 | py | Python | data/syn_project/queries/syn_project_query.py | ki-tools/sls_ki_synapse | 8c726a9ec568e3d416049a8813c21bbe87740f16 | [
"Apache-2.0"
] | 1 | 2018-11-21T19:54:34.000Z | 2018-11-21T19:54:34.000Z | data/syn_project/queries/syn_project_query.py | pcstout/sls_ki_synapse | 8c726a9ec568e3d416049a8813c21bbe87740f16 | [
"Apache-2.0"
] | 5 | 2019-03-12T16:44:35.000Z | 2019-03-15T21:46:00.000Z | data/syn_project/queries/syn_project_query.py | ki-tools/sls_ki_synapse | 8c726a9ec568e3d416049a8813c21bbe87740f16 | [
"Apache-2.0"
] | 2 | 2019-02-28T23:16:32.000Z | 2019-03-05T22:16:39.000Z | import graphene
from ..types import SynProject
from core import Synapse
class SynProjectQuery(graphene.ObjectType):
"""
Defines all the SynProject queries.
"""
syn_project = graphene.Field(
SynProject,
id=graphene.String(required=True)
)
| 22.619048 | 51 | 0.642105 | import graphene
from ..types import SynProject
from core import Synapse
class SynProjectQuery(graphene.ObjectType):
"""
Defines all the SynProject queries.
"""
syn_project = graphene.Field(
SynProject,
id=graphene.String(required=True)
)
def resolve_syn_project(self, info, id):
project = Synapse.client().get(id)
if project:
return SynProject.from_project(project)
else:
return None
| 172 | 0 | 27 |
d0f9a14ec45437a9db87ebe67c95d4f188a5622d | 2,151 | py | Python | worker.py | reger-men/tensorflow_benchmark | bc51f98d38c2e54fca61b4ce628b0e6c457e04ac | [
"MIT"
] | 6 | 2019-12-31T23:32:35.000Z | 2021-05-20T06:20:19.000Z | worker.py | reger-men/tensorflow_benchmark | bc51f98d38c2e54fca61b4ce628b0e6c457e04ac | [
"MIT"
] | null | null | null | worker.py | reger-men/tensorflow_benchmark | bc51f98d38c2e54fca61b4ce628b0e6c457e04ac | [
"MIT"
] | 2 | 2019-12-18T19:28:13.000Z | 2020-05-04T09:44:46.000Z | from paramiko import SSHClient, AutoAddPolicy
from paramiko.auth_handler import AuthenticationException
from paramiko.ssh_exception import NoValidConnectionsError
class Config(object):
"""Worker access data"""
class Worker(object):
"""Worker Object to connect and execute commands from the 'chief' worker"""
def connect(self):
"""Connect to the worker"""
if self.client is None:
try:
client = SSHClient()
client.set_missing_host_key_policy(self.policy)
client.connect(hostname=self.ip,
port=self.port,
username=self.user,
password=self.pwd)
except AuthenticationException:
print("Authentication failed!")
except NoValidConnectionsError:
print("Connection failed!")
finally:
client.exec_command("hostnamectl")
return client
return self.client
def exec_cmd(self, cmd, inBackground=False, timeout=None):
"""Execute command and return status and output"""
""" status 0 means no error"""
status=0
stdout='Process run in background'
self.client = self.connect()
if inBackground:
transport = self.client.get_transport()
channel = transport.open_session()
channel.setblocking(0)
channel.exec_command(cmd)
else:
stdin, stdout, stderr = self.client.exec_command(cmd)
status = stdout.channel.recv_exit_status()
if status != 0:
stdout = stderr
return status, stdout
| 31.173913 | 80 | 0.569503 | from paramiko import SSHClient, AutoAddPolicy
from paramiko.auth_handler import AuthenticationException
from paramiko.ssh_exception import NoValidConnectionsError
class Config(object):
"""Worker access data"""
def __init__(self, ip, user, pwd, port=22):
self.ip = ip
self.port = port
self.user = user
self.pwd = pwd
class Worker(object):
"""Worker Object to connect and execute commands from the 'chief' worker"""
def __init__(self, config):
self.ip = config.ip
self.port = config.port
self.user = config.user
self.pwd = config.pwd
self.client = None
self.policy = AutoAddPolicy()
def connect(self):
"""Connect to the worker"""
if self.client is None:
try:
client = SSHClient()
client.set_missing_host_key_policy(self.policy)
client.connect(hostname=self.ip,
port=self.port,
username=self.user,
password=self.pwd)
except AuthenticationException:
print("Authentication failed!")
except NoValidConnectionsError:
print("Connection failed!")
finally:
client.exec_command("hostnamectl")
return client
return self.client
def exec_cmd(self, cmd, inBackground=False, timeout=None):
"""Execute command and return status and output"""
""" status 0 means no error"""
status=0
stdout='Process run in background'
self.client = self.connect()
if inBackground:
transport = self.client.get_transport()
channel = transport.open_session()
channel.setblocking(0)
channel.exec_command(cmd)
else:
stdin, stdout, stderr = self.client.exec_command(cmd)
status = stdout.channel.recv_exit_status()
if status != 0:
stdout = stderr
return status, stdout
def disconnect(self):
self.client.close()
| 337 | 0 | 89 |
bcd2ed3d312e4715a91b90c5cbcdb3cae15e20b0 | 752 | py | Python | python/test/util.py | iamvarol/spark-nlp | 90bacd5cc9723cc0c4be53c5ffa8891ca450ada7 | [
"Apache-2.0"
] | null | null | null | python/test/util.py | iamvarol/spark-nlp | 90bacd5cc9723cc0c4be53c5ffa8891ca450ada7 | [
"Apache-2.0"
] | null | null | null | python/test/util.py | iamvarol/spark-nlp | 90bacd5cc9723cc0c4be53c5ffa8891ca450ada7 | [
"Apache-2.0"
] | null | null | null | from pyspark.sql import SparkSession
import os
| 30.08 | 89 | 0.62367 | from pyspark.sql import SparkSession
import os
class SparkSessionForTest:
spark = SparkSession.builder \
.master("local[*]") \
.config("spark.jars", 'lib/sparknlp.jar') \
.config("spark.driver.memory", "12G") \
.config("spark.driver.maxResultSize", "2G") \
.config("spark.serializer", "org.apache.spark.serializer.KryoSerializer") \
.config("spark.kryoserializer.buffer.max", "500m") \
.getOrCreate()
spark.sparkContext.setLogLevel("WARN")
class SparkContextForTest:
spark = SparkSessionForTest.spark
data = spark. \
read \
.parquet("file:///" + os.getcwd() + "/../src/test/resources/sentiment.parquet") \
.limit(100)
data.cache()
data.count()
| 0 | 657 | 46 |
dfdaee496a68b4ad6cd43c190ebddc067bf47fca | 5,280 | py | Python | feature_mining/em_original.py | nfreundlich/CS410_CourseProject | c50d0ff04472e48b0b59abe4467dc17d7c2cfab8 | [
"MIT"
] | null | null | null | feature_mining/em_original.py | nfreundlich/CS410_CourseProject | c50d0ff04472e48b0b59abe4467dc17d7c2cfab8 | [
"MIT"
] | 18 | 2018-10-24T01:35:45.000Z | 2018-12-17T03:57:36.000Z | feature_mining/em_original.py | nfreundlich/CS410_CourseProject | c50d0ff04472e48b0b59abe4467dc17d7c2cfab8 | [
"MIT"
] | null | null | null | import numpy as np
import math
from feature_mining.em_base import ExpectationMaximization
class ExpectationMaximizationOriginal(ExpectationMaximization):
"""
Original EM Algorithm as developed by Santu.
"""
if __name__ == '__main__':
em = ExpectationMaximizationOriginal()
em.em()
| 44.745763 | 143 | 0.57197 | import numpy as np
import math
from feature_mining.em_base import ExpectationMaximization
class ExpectationMaximizationOriginal(ExpectationMaximization):
"""
Original EM Algorithm as developed by Santu.
"""
def __init__(self, dump_path="../tests/data/em_01/"):
print(type(self).__name__, '- init...')
ExpectationMaximization.__init__(self, dump_path=dump_path)
self.previous_pi = []
def import_data(self):
print(type(self).__name__, '- import data...')
self.reviews = np.load(self.dump_path + "Reviews.npy")
self.topic_model = np.load(self.dump_path + 'TopicModel.npy').item()
self.background_probability = np.load(self.dump_path + 'BackgroundProbability.npy').item()
def initialize_parameters(self):
print(type(self).__name__, '- initialize parameters...')
self.hidden_parameters = np.load(self.dump_path + "HP.npy")
self.hidden_parameters_background = np.load(self.dump_path + "HPB.npy")
self.pi = np.load(self.dump_path + "PI.npy")
def e_step(self):
print(type(self).__name__, '- e_step...')
"""
E-Step of EM algo, as implemented by ***Santu***.
Compute HP and BHP.
Input:
reviews
topic_model
pi
background_probability
lambda_background
hidden_parameters
hidden_parameters_background
Output:
updated hidden_parameters
updated hidden_parameters_background
"""
for reviewNum in range(0, len(self.reviews)):
for lineNum in range(0, len(self.reviews[reviewNum])):
for word in self.reviews[reviewNum][lineNum]:
my_sum = 0
for aspect in self.topic_model:
my_sum += self.pi[reviewNum][lineNum][aspect] * self.topic_model[aspect][word]
for aspect in self.topic_model:
self.hidden_parameters[reviewNum][lineNum][word][aspect] = self.pi[reviewNum][lineNum][aspect] * \
self.topic_model[aspect][
word] / my_sum
self.hidden_parameters_background[reviewNum][lineNum][word] = \
(self.lambda_background * self.background_probability[word]) / \
(self.lambda_background * self.background_probability[word] + ((1 - self.lambda_background) * my_sum))
def m_step(self):
print(type(self).__name__, '- m_step...')
self.previous_pi = []
for reviewNum in range(0, len(self.reviews)):
self.previous_pi.append(list())
for lineNum in range(0, len(self.reviews[reviewNum])):
self.previous_pi[reviewNum].append({})
for aspect in self.topic_model:
self.previous_pi[reviewNum][lineNum][aspect] = self.pi[reviewNum][lineNum][aspect]
for reviewNum in range(0, len(self.reviews)):
for lineNum in range(0, len(self.reviews[reviewNum])):
denom = 0
for aspect in self.topic_model:
for word in self.reviews[reviewNum][lineNum]:
denom += self.reviews[reviewNum][lineNum][word] * (1 - self.hidden_parameters_background[reviewNum][lineNum][word]) * \
self.hidden_parameters[reviewNum][lineNum][word][aspect]
# np.save(self.dump_path + "DENOM", denom)
for aspect in self.topic_model:
nom = 0
for word in self.reviews[reviewNum][lineNum]:
nom += self.reviews[reviewNum][lineNum][word] * (1 - self.hidden_parameters_background[reviewNum][lineNum][word]) * \
self.hidden_parameters[reviewNum][lineNum][word][aspect]
# np.save(self.dump_path + "NOM", nom)
try:
self.pi[reviewNum][lineNum][aspect] = nom / denom
except:
print(reviewNum, lineNum, aspect, nom, denom)
def compute_cost(self):
#self.pi = np.load(self.dump_path + "PI_updated.npy")
dist = 0.0
for reviewNum in range(0, len(self.reviews)):
for lineNum in range(0, len(self.reviews[reviewNum])):
for aspect in self.topic_model:
dist = dist + math.pow(self.pi[reviewNum][lineNum][aspect] - self.previous_pi[reviewNum][lineNum][aspect], 2)
print('dist=' + str(dist))
np.save(self.dump_path + "MY_DIST", dist)
return 0.0
def _dump_hidden_parameters(self):
print(type(self).__name__, '- _dump_hidden_parameters...')
np.save(self.dump_path + "MY_HP_Updated", self.hidden_parameters)
np.save(self.dump_path + "MY_HPB_updated", self.hidden_parameters_background)
np.save(self.dump_path + "MY_PI_updated", self.pi)
np.save(self.dump_path + "MY_PREVIOUS_PI", self.previous_pi)
if __name__ == '__main__':
em = ExpectationMaximizationOriginal()
em.em()
| 4,785 | 0 | 189 |
400952750f2e5961c31828c24e96fa7524ba2ebd | 98,704 | py | Python | pygeoutil/ggeo.py | ritviksahajpal/pygeoutil | b3d396c64eb7ecb8090bc6be9765003ce0b18cf5 | [
"MIT"
] | 1 | 2018-02-28T10:02:39.000Z | 2018-02-28T10:02:39.000Z | pygeoutil/ggeo.py | ritviksahajpal/pygeoutil | b3d396c64eb7ecb8090bc6be9765003ce0b18cf5 | [
"MIT"
] | 7 | 2016-08-14T03:54:26.000Z | 2022-03-11T23:17:45.000Z | pygeoutil/ggeo.py | ritviksahajpal/pygeoutil | b3d396c64eb7ecb8090bc6be9765003ce0b18cf5 | [
"MIT"
] | null | null | null | import os
import pdb
import time
import errno
import shapely.wkt
import shapely.ops
from shapely import speedups
import shapely.prepared
import subprocess
import logging
import tempfile
import distutils.version
import atexit
import functools
import math
import numpy
import gdal
import gdalconst
from osgeo import gdal
from osgeo import osr
from osgeo import ogr
LOGGER = logging.getLogger('pygeoprocessing.geoprocessing')
LOGGER.addHandler(logging.NullHandler()) # silence logging by default
_LOGGING_PERIOD = 5.0 # min 5.0 seconds per update log message for the module
_DEFAULT_GTIFF_CREATION_OPTIONS = (
'TILED=YES', 'BIGTIFF=IF_SAFER', 'COMPRESS=LZW')
_LARGEST_ITERBLOCK = 2**20 # largest block for iterblocks to read in cells
# A dictionary to map the resampling method input string to the gdal type
_RESAMPLE_DICT = {
"nearest": gdal.GRA_NearestNeighbour,
"bilinear": gdal.GRA_Bilinear,
"cubic": gdal.GRA_Cubic,
"cubic_spline": gdal.GRA_CubicSpline,
"lanczos": gdal.GRA_Lanczos,
'mode': gdal.GRA_Mode,
'average': gdal.GRA_Average,
}
# GDAL 2.2.3 added a couple of useful interpolation values.
if (distutils.version.LooseVersion(gdal.__version__)
>= distutils.version.LooseVersion('2.2.3')):
_RESAMPLE_DICT.update({
'max': gdal.GRA_Max,
'min': gdal.GRA_Min,
'med': gdal.GRA_Med,
'q1': gdal.GRA_Q1,
'q3': gdal.GRA_Q3,
})
def convert_raster_to_ascii(path_input_raster, path_ascii_output, overwrite=True):
"""
Convert input raster to ascii format
Args:
path_input_raster:
path_ascii_output:
overwrite:
Returns:
"""
if overwrite and os.path.isfile(path_ascii_output):
os.remove(path_ascii_output)
# Open existing dataset
path_inp_ds = gdal.Open(path_input_raster)
# Open output format driver, gdal_translate --formats lists all of them
format_file = 'AAIGrid'
driver = gdal.GetDriverByName(format_file)
# Output to new format
path_dest_ds = driver.CreateCopy(path_ascii_output, path_inp_ds, 0)
# Close the datasets to flush to disk
path_dest_ds = None
path_inp_ds = None
def get_dataset_type(path_ds):
"""
Return dataset type e.g. GeoTiff
Args:
path_ds:
Returns:
"""
dataset = gdal.Open(path_ds, gdalconst.GA_ReadOnly)
dataset_type = dataset.GetDriver().LongName
dataset = None # Close dataset
return dataset_type
def get_dataset_datatype(path_ds):
"""
Return datatype of dataset e.g. GDT_UInt32
Args:
path_ds:
Returns:
"""
dataset = gdal.Open(path_ds, gdalconst.GA_ReadOnly)
band = dataset.GetRasterBand(1)
bandtype = gdal.GetDataTypeName(band.DataType) # UInt32
dataset = None # Close dataset
if bandtype == 'UInt32':
return gdalconst.GDT_UInt32
elif bandtype == 'UInt16':
return gdalconst.GDT_UInt16
elif bandtype == 'Float32':
return gdalconst.GDT_Float32
elif bandtype == 'Float64':
return gdalconst.GDT_Float64
elif bandtype == 'Int16':
return gdalconst.GDT_Int16
elif bandtype == 'Int32':
return gdalconst.GDT_Int32
elif bandtype == 'Unknown':
return gdalconst.GDT_Unknown
else:
return gdalconst.GDT_UInt32
def _gdal_to_numpy_type(band):
"""Calculates the equivalent numpy datatype from a GDAL raster band type
band - GDAL band
returns numpy equivalent of band.DataType"""
gdal_type_to_numpy_lookup = {
gdal.GDT_Int16: numpy.int16,
gdal.GDT_Int32: numpy.int32,
gdal.GDT_UInt16: numpy.uint16,
gdal.GDT_UInt32: numpy.uint32,
gdal.GDT_Float32: numpy.float32,
gdal.GDT_Float64: numpy.float64
}
if band.DataType in gdal_type_to_numpy_lookup:
return gdal_type_to_numpy_lookup[band.DataType]
#only class not in the lookup is a Byte but double check.
if band.DataType != gdal.GDT_Byte:
raise ValueError("Unknown DataType: %s" % str(band.DataType))
metadata = band.GetMetadata('IMAGE_STRUCTURE')
if 'PIXELTYPE' in metadata and metadata['PIXELTYPE'] == 'SIGNEDBYTE':
return numpy.int8
return numpy.uint8
def get_datatype_from_uri(dataset_uri):
"""
Returns the datatype for the first band from a gdal dataset
Args:
dataset_uri (string): a uri to a gdal dataset
Returns:
datatype: datatype for dataset band 1"""
dataset = gdal.Open(dataset_uri)
band = dataset.GetRasterBand(1)
datatype = band.DataType
#Make sure the dataset is closed and cleaned up
band = None
gdal.Dataset.__swig_destroy__(dataset)
dataset = None
return datatype
def get_row_col_from_uri(dataset_uri):
"""
Returns a tuple of number of rows and columns of that dataset uri.
Args:
dataset_uri (string): a uri to a gdal dataset
Returns:
tuple (tuple): 2-tuple (n_row, n_col) from dataset_uri"""
dataset = gdal.Open(dataset_uri)
n_rows = dataset.RasterYSize
n_cols = dataset.RasterXSize
#Make sure the dataset is closed and cleaned up
band = None
gdal.Dataset.__swig_destroy__(dataset)
dataset = None
return (n_rows, n_cols)
def calculate_raster_stats_uri(dataset_uri):
"""
Calculates and sets the min, max, stdev, and mean for the bands in
the raster.
Args:
dataset_uri (string): a uri to a GDAL raster dataset that will be
modified by having its band statistics set
Returns:
nothing
"""
dataset = gdal.Open(dataset_uri, gdal.GA_Update)
for band_number in range(dataset.RasterCount):
band = dataset.GetRasterBand(band_number + 1)
band.ComputeStatistics(False)
# Close and clean up dataset
band = None
gdal.Dataset.__swig_destroy__(dataset)
dataset = None
def get_statistics_from_uri(dataset_uri):
"""
Retrieves the min, max, mean, stdev from a GDAL Dataset
Args:
dataset_uri (string): a uri to a gdal dataset
Returns:
statistics: min, max, mean, stddev
"""
dataset = gdal.Open(dataset_uri)
band = dataset.GetRasterBand(1)
statistics = band.GetStatistics(0, 1)
# Close and clean up dataset
band = None
gdal.Dataset.__swig_destroy__(dataset)
dataset = None
return statistics
def get_cell_size_from_uri(dataset_uri):
"""
Returns the cell size of the dataset in meters. Raises an exception if the
raster is not square since this'll break most of the raster_utils
algorithms.
Args:
dataset_uri (string): uri to a gdal dataset
Returns:
size_meters: cell size of the dataset in meters"""
srs = osr.SpatialReference()
dataset = gdal.Open(dataset_uri)
if dataset is None:
raise IOError(
'File not found or not valid dataset type at: %s' % dataset_uri)
srs.SetProjection(dataset.GetProjection())
linear_units = srs.GetLinearUnits()
geotransform = dataset.GetGeoTransform()
# take absolute value since sometimes negative widths/heights
try:
numpy.testing.assert_approx_equal(
abs(geotransform[1]), abs(geotransform[5]))
size_meters = abs(geotransform[1]) * linear_units
except AssertionError as e:
size_meters = (
abs(geotransform[1]) + abs(geotransform[5])) / 2.0 * linear_units
# Close and clean up dataset
gdal.Dataset.__swig_destroy__(dataset)
dataset = None
return size_meters
def get_rat_as_dictionary_uri(dataset_uri):
"""
Returns the RAT of the first band of dataset as a dictionary.
Args:
dataset_uri: a GDAL dataset that has a RAT associated with the first band
Returns:
value (dictionary): a 2D dictionary where the first key is the column name and second is the row number
"""
dataset = gdal.Open(dataset_uri)
value = get_rat_as_dictionary(dataset)
# Make sure the dataset is closed and cleaned up
gdal.Dataset.__swig_destroy__(dataset)
dataset = None
return value
def get_rat_as_dictionary(dataset):
"""
Returns the RAT of the first band of dataset as a dictionary.
Args:
dataset: a GDAL dataset that has a RAT associated with the first band
Returns:
rat_dictionary (dictionary): a 2D dictionary where the first key is the column name and second is the row number
"""
pdb.set_trace()
band = dataset.GetRasterBand(1).GetDefaultRAT()
rat = band.GetDefaultRAT()
n_columns = rat.GetColumnCount()
n_rows = rat.GetRowCount()
rat_dictionary = {}
for col_index in range(n_columns):
# Initialize an empty list to store row data and figure out the type of data stored in that column.
col_type = rat.GetTypeOfCol(col_index)
col_name = rat.GetNameOfCol(col_index)
rat_dictionary[col_name] = []
# Now burn through all the rows to populate the column
for row_index in range(n_rows):
# This bit of python ugliness handles the known 3 types of gdal RAT fields.
if col_type == gdal.GFT_Integer:
value = rat.GetValueAsInt(row_index, col_index)
elif col_type == gdal.GFT_Real:
value = rat.GetValueAsDouble(row_index, col_index)
else:
# If the type is not int or real, default to a string, I think this is better than testing for a string
# and raising an exception if not
value = rat.GetValueAsString(row_index, col_index)
rat_dictionary[col_name].append(value)
return rat_dictionary
def get_raster_properties_uri(dataset_uri):
"""
Wrapper function for get_raster_properties() that passes in the dataset
URI instead of the datasets itself
Args:
dataset_uri (string): a URI to a GDAL raster dataset
Returns:
value (dictionary): a dictionary with the properties stored under relevant keys. The current list of things
returned is: width (w-e pixel resolution), height (n-s pixel resolution), XSize, YSize
"""
dataset = gdal.Open(dataset_uri)
value = get_raster_properties(dataset)
# Make sure the dataset is closed and cleaned up
gdal.Dataset.__swig_destroy__(dataset)
dataset = None
return value
def get_raster_properties(dataset):
"""
Get the width, height, X size, and Y size of the dataset and return the
values in a dictionary.
*This function can be expanded to return more properties if needed*
Args:
dataset: a GDAL raster dataset to get the properties from
Returns:
dataset_dict (dictionary): a dictionary with the properties stored under relevant keys. The current list of
things returned is: width (w-e pixel resolution), height (n-s pixel resolution), XSize, YSize
"""
dataset_dict = {}
geo_transform = dataset.GetGeoTransform()
dataset_dict['width'] = float(geo_transform[1])
dataset_dict['height'] = float(geo_transform[5])
dataset_dict['x_size'] = dataset.GetRasterBand(1).XSize
dataset_dict['y_size'] = dataset.GetRasterBand(1).YSize
return dataset_dict
def get_nodata_from_uri(dataset_uri):
"""
Returns the nodata value for the first band from a gdal dataset cast to its
correct numpy type.
Args:
dataset_uri (string): a uri to a gdal dataset
Returns:
nodata_cast: nodata value for dataset band 1
"""
dataset = gdal.Open(dataset_uri)
band = dataset.GetRasterBand(1)
nodata = band.GetNoDataValue()
if nodata is not None:
nodata = _gdal_to_numpy_type(band)(nodata)
else:
pass
band = None
gdal.Dataset.__swig_destroy__(dataset)
dataset = None
return nodata
def reclassify(rasterio_rst, reclass_list, output_filename, band=1, creation_options=dict()):
"""
MODIFIED: removed window walking... too slow..
this function will take a raster image as input and
reclassify its values given in the reclass_list.
The reclass list is a simple list of lists with the
following formatting:
[[begin_range, end_range, new_value]]
ie. [ [ 1,3,5 ],[ 3,4,6 ] ]
* which converts values 1 to 2.99999999 to 5
and values 3 to 3.99999999 to 6
all other values stay the same.
arguments:
rasterio_rst = raster image instance from rasterio package
reclass_list = list of reclassification values * see explanation
band = integer marking which band you wnat to return from the raster
default is 1.
creation_options = gdal style creation options, but in the rasterio implementation
* options must be in a dict where the key is the name of the gdal -co and the
value is the value passed to that flag.
i.e.
["COMPRESS=LZW"] becomes dict([('compress','lzw')])
"""
# this will update the metadata if a creation_options dict is passed as an arg.
import rasterio
meta = rasterio_rst.meta
if len(creation_options) < 0:
meta.update(creation_options)
with rasterio.open(output_filename, mode='w', **meta) as out_rst:
band_arr = rasterio_rst.read_band(band).data # this is a gotcha with the .data stuff
for rcl in reclass_list:
band_arr[numpy.logical_and(band_arr >= rcl[0], band_arr < rcl[1])] = rcl[2]
out_rst.write_band(band, band_arr)
return rasterio.open(output_filename)
def get_cell_size_from_uri(dataset_uri):
"""Get the cell size of a dataset in units of meters.
Raises an exception if the raster is not square since this'll break most of
the pygeoprocessing algorithms.
Args:
dataset_uri (string): uri to a gdal dataset
Returns:
size_meters: cell size of the dataset in meters
"""
srs = osr.SpatialReference()
dataset = gdal.Open(dataset_uri)
if dataset is None:
raise IOError(
'File not found or not valid dataset type at: %s' % dataset_uri)
srs.SetProjection(dataset.GetProjection())
linear_units = srs.GetLinearUnits()
geotransform = dataset.GetGeoTransform()
# take absolute value since sometimes negative widths/heights
try:
numpy.testing.assert_approx_equal(
abs(geotransform[1]), abs(geotransform[5]))
size_meters = abs(geotransform[1]) * linear_units
except AssertionError as e:
size_meters = (
abs(geotransform[1]) + abs(geotransform[5])) / 2.0 * linear_units
# Close and clean up dataset
gdal.Dataset.__swig_destroy__(dataset)
dataset = None
return size_meters
def reclassify_dataset_uri(
dataset_uri, value_map, raster_out_uri, out_datatype, out_nodata,
exception_flag='values_required', assert_dataset_projected=True):
"""Reclassify values in a dataset.
A function to reclassify values in dataset to any output type. By default
the values except for nodata must be in value_map.
Args:
dataset_uri (string): a uri to a gdal dataset
value_map (dictionary): a dictionary of values of
{source_value: dest_value, ...}
where source_value's type is a postive integer type and dest_value
is of type out_datatype.
raster_out_uri (string): the uri for the output raster
out_datatype (gdal type): the type for the output dataset
out_nodata (numerical type): the nodata value for the output raster.
Must be the same type as out_datatype
Keyword Args:
exception_flag (string): either 'none' or 'values_required'.
If 'values_required' raise an exception if there is a value in the
raster that is not found in value_map
assert_dataset_projected (boolean): if True this operation will
test if the input dataset is not projected and raise an exception
if so.
Returns:
nothing
Raises:
Exception: if exception_flag == 'values_required' and the value from
'key_raster' is not a key in 'attr_dict'
"""
if exception_flag not in ['none', 'values_required']:
raise ValueError('unknown exception_flag %s', exception_flag)
values_required = exception_flag == 'values_required'
nodata = get_nodata_from_uri(dataset_uri)
value_map_copy = value_map.copy()
# possible that nodata value is not defined, so test for None first
# otherwise if nodata not predefined, remap it into the dictionary
if nodata is not None and nodata not in value_map_copy:
value_map_copy[nodata] = out_nodata
keys = sorted(value_map_copy.keys())
values = numpy.array([value_map_copy[x] for x in keys])
def map_dataset_to_value(original_values):
"""Convert a block of original values to the lookup values."""
if values_required:
unique = numpy.unique(original_values)
has_map = numpy.in1d(unique, keys)
if not all(has_map):
raise ValueError(
'There was not a value for at least the following codes '
'%s for this file %s.\nNodata value is: %s' % (
str(unique[~has_map]), dataset_uri, str(nodata)))
index = numpy.digitize(original_values.ravel(), keys, right=True)
return values[index].reshape(original_values.shape)
out_pixel_size = get_cell_size_from_uri(dataset_uri)
vectorize_datasets(
[dataset_uri], map_dataset_to_value,
raster_out_uri, out_datatype, out_nodata, out_pixel_size,
"intersection", dataset_to_align_index=0,
vectorize_op=False, assert_datasets_projected=assert_dataset_projected,
datasets_are_pre_aligned=True)
def clip_dataset_uri(
source_dataset_uri, aoi_datasource_uri, out_dataset_uri,
assert_projections=True, process_pool=None, all_touched=False):
"""Clip raster dataset to bounding box of provided vector datasource aoi.
This function will clip source_dataset to the bounding box of the
polygons in aoi_datasource and mask out the values in source_dataset
outside of the AOI with the nodata values in source_dataset.
Args:
source_dataset_uri (string): uri to single band GDAL dataset to clip
aoi_datasource_uri (string): uri to ogr datasource
out_dataset_uri (string): path to disk for the clipped datset
Keyword Args:
assert_projections (boolean): a boolean value for whether the dataset
needs to be projected
process_pool: a process pool for multiprocessing
all_touched (boolean): if true the clip uses the option
ALL_TOUCHED=TRUE when calling RasterizeLayer for AOI masking.
Returns:
None
"""
source_dataset = gdal.Open(source_dataset_uri)
band = source_dataset.GetRasterBand(1)
nodata = band.GetNoDataValue()
datatype = band.DataType
if nodata is None:
nodata = -9999
gdal.Dataset.__swig_destroy__(source_dataset)
source_dataset = None
pixel_size = get_raster_info(source_dataset_uri)['mean_pixel_size']
vectorize_datasets(
[source_dataset_uri], lambda x: x, out_dataset_uri, datatype, nodata,
pixel_size, 'intersection', aoi_uri=aoi_datasource_uri,
assert_datasets_projected=assert_projections,
process_pool=process_pool, vectorize_op=False, all_touched=all_touched)
def get_raster_info(raster_path):
"""Get information about a GDAL raster dataset.
Parameters:
raster_path (String): a path to a GDAL raster.
Returns:
raster_properties (dictionary): a dictionary with the properties
stored under relevant keys.
'pixel_size' (tuple): (pixel x-size, pixel y-size) from
geotransform.
'mean_pixel_size' (float): the average size of the absolute value
of each pixel size element.
'raster_size' (tuple): number of raster pixels in (x, y)
direction.
'nodata' (float or list): if number of bands is 1, then this value
is the nodata value of the single band, otherwise a list of
the nodata values in increasing band index
'n_bands' (int): number of bands in the raster.
'geotransform' (tuple): a 6-tuple representing the geotransform of
(x orign, x-increase, xy-increase,
y origin, yx-increase, y-increase),
'datatype' (int): An instance of an enumerated gdal.GDT_* int
that represents the datatype of the raster.
"""
raster_properties = {}
raster = gdal.Open(raster_path)
geo_transform = raster.GetGeoTransform()
raster_properties['pixel_size'] = (geo_transform[1], geo_transform[5])
raster_properties['mean_pixel_size'] = (
(abs(geo_transform[1]) + abs(geo_transform[5])) / 2.0)
raster_properties['raster_size'] = (
raster.GetRasterBand(1).XSize,
raster.GetRasterBand(1).YSize)
raster_properties['n_bands'] = raster.RasterCount
raster_properties['nodata'] = [
raster.GetRasterBand(index).GetNoDataValue() for index in range(
1, raster_properties['n_bands']+1)]
if len(raster_properties['nodata']) == 1:
raster_properties['nodata'] = raster_properties['nodata'][0]
raster_properties['bounding_box'] = [
geo_transform[0], geo_transform[3],
(geo_transform[0] +
raster_properties['raster_size'][0] * geo_transform[1]),
(geo_transform[3] +
raster_properties['raster_size'][1] * geo_transform[5])]
raster_properties['geotransform'] = geo_transform
# datatype is the same for the whole raster, but is associated with band
raster_properties['datatype'] = raster.GetRasterBand(1).DataType
raster = None
return raster_properties
def vectorize_datasets(
dataset_uri_list, dataset_pixel_op, dataset_out_uri, datatype_out,
nodata_out, pixel_size_out, bounding_box_mode,
resample_method_list=None, dataset_to_align_index=None,
dataset_to_bound_index=None, aoi_uri=None,
assert_datasets_projected=True, process_pool=None, vectorize_op=True,
datasets_are_pre_aligned=False, dataset_options=None,
all_touched=False):
"""Apply local raster operation on stack of datasets.
This function applies a user defined function across a stack of
datasets. It has functionality align the output dataset grid
with one of the input datasets, output a dataset that is the union
or intersection of the input dataset bounding boxes, and control
over the interpolation techniques of the input datasets, if
necessary. The datasets in dataset_uri_list must be in the same
projection; the function will raise an exception if not.
Args:
dataset_uri_list (list): a list of file uris that point to files that
can be opened with gdal.Open.
dataset_pixel_op (function) a function that must take in as many
arguments as there are elements in dataset_uri_list. The arguments
can be treated as interpolated or actual pixel values from the
input datasets and the function should calculate the output
value for that pixel stack. The function is a parallel
paradigmn and does not know the spatial position of the
pixels in question at the time of the call. If the
`bounding_box_mode` parameter is "union" then the values
of input dataset pixels that may be outside their original
range will be the nodata values of those datasets. Known
bug: if dataset_pixel_op does not return a value in some cases
the output dataset values are undefined even if the function
does not crash or raise an exception.
dataset_out_uri (string): the uri of the output dataset. The
projection will be the same as the datasets in dataset_uri_list.
datatype_out: the GDAL output type of the output dataset
nodata_out: the nodata value of the output dataset.
pixel_size_out: the pixel size of the output dataset in
projected coordinates.
bounding_box_mode (string): one of "union" or "intersection",
"dataset". If union the output dataset bounding box will be the
union of the input datasets. Will be the intersection otherwise.
An exception is raised if the mode is "intersection" and the
input datasets have an empty intersection. If dataset it will make
a bounding box as large as the given dataset, if given
dataset_to_bound_index must be defined.
Keyword Args:
resample_method_list (list): a list of resampling methods
for each output uri in dataset_out_uri list. Each element
must be one of "nearest|bilinear|cubic|cubic_spline|lanczos".
If None, the default is "nearest" for all input datasets.
dataset_to_align_index (int): an int that corresponds to the position
in one of the dataset_uri_lists that, if positive aligns the output
rasters to fix on the upper left hand corner of the output
datasets. If negative, the bounding box aligns the intersection/
union without adjustment.
dataset_to_bound_index: if mode is "dataset" this indicates which
dataset should be the output size.
aoi_uri (string): a URI to an OGR datasource to be used for the
aoi. Irrespective of the `mode` input, the aoi will be used
to intersect the final bounding box.
assert_datasets_projected (boolean): if True this operation will
test if any datasets are not projected and raise an exception
if so.
process_pool: a process pool for multiprocessing
vectorize_op (boolean): if true the model will try to numpy.vectorize
dataset_pixel_op. If dataset_pixel_op is designed to use maximize
array broadcasting, set this parameter to False, else it may
inefficiently invoke the function on individual elements.
datasets_are_pre_aligned (boolean): If this value is set to False
this operation will first align and interpolate the input datasets
based on the rules provided in bounding_box_mode,
resample_method_list, dataset_to_align_index, and
dataset_to_bound_index, if set to True the input dataset list must
be aligned, probably by raster_utils.align_dataset_list
dataset_options: this is an argument list that will be
passed to the GTiff driver. Useful for blocksizes, compression,
etc.
all_touched (boolean): if true the clip uses the option
ALL_TOUCHED=TRUE when calling RasterizeLayer for AOI masking.
Returns:
None
Raises:
ValueError: invalid input provided
"""
if not isinstance(dataset_uri_list, list):
raise ValueError(
"dataset_uri_list was not passed in as a list, maybe a single "
"file was passed in? Here is its value: %s" %
(str(dataset_uri_list)))
if aoi_uri is None:
assert_file_existance(dataset_uri_list)
else:
assert_file_existance(dataset_uri_list + [aoi_uri])
if dataset_out_uri in dataset_uri_list:
raise ValueError(
"%s is used as an output file, but it is also an input file "
"in the input list %s" % (dataset_out_uri, str(dataset_uri_list)))
valid_bounding_box_modes = ["union", "intersection", "dataset"]
if bounding_box_mode not in valid_bounding_box_modes:
raise ValueError(
"Unknown bounding box mode %s; should be one of %s",
bounding_box_mode, valid_bounding_box_modes)
# Create a temporary list of filenames whose files delete on the python
# interpreter exit
if not datasets_are_pre_aligned:
# Handle the cases where optional arguments are passed in
if resample_method_list is None:
resample_method_list = ["nearest"] * len(dataset_uri_list)
if dataset_to_align_index is None:
dataset_to_align_index = -1
dataset_out_uri_list = [
temporary_filename(suffix='.tif') for _ in dataset_uri_list]
# Align and resample the datasets, then load datasets into a list
align_dataset_list(
dataset_uri_list, dataset_out_uri_list, resample_method_list,
pixel_size_out, bounding_box_mode, dataset_to_align_index,
dataset_to_bound_index=dataset_to_bound_index,
aoi_uri=aoi_uri,
assert_datasets_projected=assert_datasets_projected,
all_touched=all_touched)
aligned_datasets = [
gdal.Open(filename, gdal.GA_ReadOnly) for filename in
dataset_out_uri_list]
else:
# otherwise the input datasets are already aligned
aligned_datasets = [
gdal.Open(filename, gdal.GA_ReadOnly) for filename in
dataset_uri_list]
aligned_bands = [dataset.GetRasterBand(1) for dataset in aligned_datasets]
n_rows = aligned_datasets[0].RasterYSize
n_cols = aligned_datasets[0].RasterXSize
output_dataset = new_raster_from_base(
aligned_datasets[0], dataset_out_uri, 'GTiff', nodata_out,
datatype_out, dataset_options=dataset_options)
output_band = output_dataset.GetRasterBand(1)
block_size = output_band.GetBlockSize()
# makes sense to get the largest block size possible to reduce the number
# of expensive readasarray calls
for current_block_size in [band.GetBlockSize() for band in aligned_bands]:
if (current_block_size[0] * current_block_size[1] >
block_size[0] * block_size[1]):
block_size = current_block_size
cols_per_block, rows_per_block = block_size[0], block_size[1]
n_col_blocks = int(math.ceil(n_cols / float(cols_per_block)))
n_row_blocks = int(math.ceil(n_rows / float(rows_per_block)))
# If there's an AOI, mask it out
if aoi_uri is not None:
mask_uri = temporary_filename(suffix='.tif')
mask_dataset = new_raster_from_base(
aligned_datasets[0], mask_uri, 'GTiff', 255, gdal.GDT_Byte,
fill_value=0, dataset_options=dataset_options)
mask_band = mask_dataset.GetRasterBand(1)
aoi_datasource = ogr.Open(aoi_uri)
aoi_layer = aoi_datasource.GetLayer()
if all_touched:
option_list = ["ALL_TOUCHED=TRUE"]
else:
option_list = []
gdal.RasterizeLayer(
mask_dataset, [1], aoi_layer, burn_values=[1], options=option_list)
aoi_layer = None
aoi_datasource = None
# We only want to do this if requested, otherwise we might have a more
# efficient call if we don't vectorize.
if vectorize_op:
dataset_pixel_op = numpy.vectorize(
dataset_pixel_op, otypes=[_gdal_to_numpy_type(output_band)])
last_time = time.time()
last_row_block_width = None
last_col_block_width = None
for row_block_index in range(n_row_blocks):
row_offset = row_block_index * rows_per_block
row_block_width = n_rows - row_offset
if row_block_width > rows_per_block:
row_block_width = rows_per_block
for col_block_index in range(n_col_blocks):
col_offset = col_block_index * cols_per_block
col_block_width = n_cols - col_offset
if col_block_width > cols_per_block:
col_block_width = cols_per_block
current_time = time.time()
if current_time - last_time > 5.0:
last_time = current_time
#This is true at least once since last_* initialized with None
if (last_row_block_width != row_block_width or
last_col_block_width != col_block_width):
dataset_blocks = [
numpy.zeros(
(row_block_width, col_block_width),
dtype=_gdal_to_numpy_type(band)) for band in aligned_bands]
if aoi_uri != None:
mask_array = numpy.zeros(
(row_block_width, col_block_width), dtype=numpy.int8)
last_row_block_width = row_block_width
last_col_block_width = col_block_width
for dataset_index in range(len(aligned_bands)):
aligned_bands[dataset_index].ReadAsArray(
xoff=col_offset, yoff=row_offset,
win_xsize=col_block_width,
win_ysize=row_block_width,
buf_obj=dataset_blocks[dataset_index])
out_block = dataset_pixel_op(*dataset_blocks)
# Mask out the row if there is a mask
if aoi_uri is not None:
mask_band.ReadAsArray(
xoff=col_offset, yoff=row_offset,
win_xsize=col_block_width,
win_ysize=row_block_width,
buf_obj=mask_array)
out_block[mask_array == 0] = nodata_out
output_band.WriteArray(
out_block[0:row_block_width, 0:col_block_width],
xoff=col_offset, yoff=row_offset)
# Making sure the band and dataset is flushed and not in memory before
# adding stats
output_band.FlushCache()
output_band = None
output_dataset.FlushCache()
gdal.Dataset.__swig_destroy__(output_dataset)
output_dataset = None
# Clean up the files made by temporary file because we had an issue once
# where I was running the water yield model over 2000 times and it made
# so many temporary files I ran out of disk space.
if aoi_uri is not None:
mask_band = None
gdal.Dataset.__swig_destroy__(mask_dataset)
mask_dataset = None
os.remove(mask_uri)
aligned_bands = None
for dataset in aligned_datasets:
gdal.Dataset.__swig_destroy__(dataset)
aligned_datasets = None
if not datasets_are_pre_aligned:
# if they weren't pre-aligned then we have temporary files to remove
for temp_dataset_uri in dataset_out_uri_list:
try:
os.remove(temp_dataset_uri)
except OSError:
pass
calculate_raster_stats_uri(dataset_out_uri
)
def assert_file_existance(dataset_uri_list):
"""Assert that provided uris exist in filesystem.
Verify that the uris passed in the argument exist on the filesystem
if not, raise an exeception indicating which files do not exist
Args:
dataset_uri_list (list): a list of relative or absolute file paths to
validate
Returns:
None
Raises:
IOError: if any files are not found
"""
not_found_uris = []
for uri in dataset_uri_list:
if not os.path.exists(uri):
not_found_uris.append(uri)
if len(not_found_uris) != 0:
error_message = (
"The following files do not exist on the filesystem: " +
str(not_found_uris))
raise IOError(error_message)
def temporary_filename(suffix=''):
"""Get path to new temporary file that will be deleted on program exit.
Returns a temporary filename using mkstemp. The file is deleted
on exit using the atexit register.
Keyword Args:
suffix (string): the suffix to be appended to the temporary file
Returns:
fname: a unique temporary filename
"""
file_handle, path = tempfile.mkstemp(suffix=suffix)
os.close(file_handle)
def remove_file(path):
"""Function to remove a file and handle exceptions to register
in atexit."""
try:
os.remove(path)
except OSError:
# This happens if the file didn't exist, which is okay because
# maybe we deleted it in a method
pass
atexit.register(remove_file, path)
return path
def new_raster_from_base_uri(base_uri, *args, **kwargs):
"""A wrapper for the function new_raster_from_base that opens up
the base_uri before passing it to new_raster_from_base.
base_uri - a URI to a GDAL dataset on disk.
All other arguments to new_raster_from_base are passed in.
Returns nothing.
"""
base_raster = gdal.Open(base_uri)
if base_raster is None:
raise IOError("%s not found when opening GDAL raster")
new_raster = new_raster_from_base(base_raster, *args, **kwargs)
gdal.Dataset.__swig_destroy__(new_raster)
gdal.Dataset.__swig_destroy__(base_raster)
new_raster = None
base_raster = None
def new_raster_from_base(
base, output_uri, gdal_format, nodata, datatype, fill_value=None,
n_rows=None, n_cols=None, dataset_options=None):
"""Create a new, empty GDAL raster dataset with the spatial references,
geotranforms of the base GDAL raster dataset.
base - a the GDAL raster dataset to base output size, and transforms on
output_uri - a string URI to the new output raster dataset.
gdal_format - a string representing the GDAL file format of the
output raster. See http://gdal.org/formats_list.html for a list
of available formats. This parameter expects the format code, such
as 'GTiff' or 'MEM'
nodata - a value that will be set as the nodata value for the
output raster. Should be the same type as 'datatype'
datatype - the pixel datatype of the output raster, for example
gdal.GDT_Float32. See the following header file for supported
pixel types:
http://www.gdal.org/gdal_8h.html#22e22ce0a55036a96f652765793fb7a4
fill_value - (optional) the value to fill in the raster on creation
n_rows - (optional) if set makes the resulting raster have n_rows in it
if not, the number of rows of the outgoing dataset are equal to
the base.
n_cols - (optional) similar to n_rows, but for the columns.
dataset_options - (optional) a list of dataset options that gets
passed to the gdal creation driver, overrides defaults
returns a new GDAL raster dataset."""
#This might be a numpy type coming in, set it to native python type
try:
nodata = nodata.item()
except AttributeError:
pass
if n_rows is None:
n_rows = base.RasterYSize
if n_cols is None:
n_cols = base.RasterXSize
projection = base.GetProjection()
geotransform = base.GetGeoTransform()
driver = gdal.GetDriverByName(gdal_format)
base_band = base.GetRasterBand(1)
block_size = base_band.GetBlockSize()
metadata = base_band.GetMetadata('IMAGE_STRUCTURE')
base_band = None
if dataset_options == None:
#make a new list to make sure we aren't ailiasing one passed in
dataset_options = []
#first, should it be tiled? yes if it's not striped
if block_size[0] != n_cols:
#just do 256x256 blocks
dataset_options = [
'TILED=YES',
'BLOCKXSIZE=256',
'BLOCKYSIZE=256',
'BIGTIFF=IF_SAFER']
if 'PIXELTYPE' in metadata:
dataset_options.append('PIXELTYPE=' + metadata['PIXELTYPE'])
new_raster = driver.Create(
output_uri.encode('utf-8'), n_cols, n_rows, 1, datatype,
options=dataset_options)
new_raster.SetProjection(projection)
new_raster.SetGeoTransform(geotransform)
band = new_raster.GetRasterBand(1)
if nodata is not None:
band.SetNoDataValue(nodata)
else:
pass
if fill_value != None:
band.Fill(fill_value)
elif nodata is not None:
band.Fill(nodata)
band = None
return new_raster
def get_bounding_box(dataset_uri):
"""Get bounding box where coordinates are in projected units.
Args:
dataset_uri (string): a uri to a GDAL dataset
Returns:
bounding_box (list):
[upper_left_x, upper_left_y, lower_right_x, lower_right_y] in
projected coordinates
"""
dataset = gdal.Open(dataset_uri)
geotransform = dataset.GetGeoTransform()
n_cols = dataset.RasterXSize
n_rows = dataset.RasterYSize
bounding_box = [geotransform[0],
geotransform[3],
geotransform[0] + n_cols * geotransform[1],
geotransform[3] + n_rows * geotransform[5]]
# Close and cleanup dataset
gdal.Dataset.__swig_destroy__(dataset)
dataset = None
return bounding_box
def align_dataset_list(
dataset_uri_list, dataset_out_uri_list, resample_method_list,
out_pixel_size, mode, dataset_to_align_index,
dataset_to_bound_index=None, aoi_uri=None,
assert_datasets_projected=True, all_touched=False):
"""Create a new list of datasets that are aligned based on a list of
inputted datasets.
Take a list of dataset uris and generates a new set that is completely
aligned with identical projections and pixel sizes.
Args:
dataset_uri_list (list): a list of input dataset uris
dataset_out_uri_list (list): a parallel dataset uri list whose
positions correspond to entries in dataset_uri_list
resample_method_list (list): a list of resampling methods for each
output uri in dataset_out_uri list. Each element must be one of
"nearest|bilinear|cubic|cubic_spline|lanczos"
out_pixel_size: the output pixel size
mode (string): one of "union", "intersection", or "dataset" which
defines how the output output extents are defined as either the
union or intersection of the input datasets or to have the same
bounds as an existing raster. If mode is "dataset" then
dataset_to_bound_index must be defined
dataset_to_align_index (int): an int that corresponds to the position
in one of the dataset_uri_lists that, if positive aligns the output
rasters to fix on the upper left hand corner of the output
datasets. If negative, the bounding box aligns the intersection/
union without adjustment.
all_touched (boolean): if True and an AOI is passed, the
ALL_TOUCHED=TRUE option is passed to the RasterizeLayer function
when determining the mask of the AOI.
Keyword Args:
dataset_to_bound_index: if mode is "dataset" then this index is
used to indicate which dataset to define the output bounds of the
dataset_out_uri_list
aoi_uri (string): a URI to an OGR datasource to be used for the
aoi. Irrespective of the `mode` input, the aoi will be used
to intersect the final bounding box.
Returns:
None
"""
import functools
last_time = time.time()
# make sure that the input lists are of the same length
list_lengths = [
len(dataset_uri_list), len(dataset_out_uri_list),
len(resample_method_list)]
if not functools.reduce(lambda x, y: x if x == y else False, list_lengths):
raise Exception(
"dataset_uri_list, dataset_out_uri_list, and "
"resample_method_list must be the same length "
" current lengths are %s" % (str(list_lengths)))
if assert_datasets_projected:
assert_datasets_in_same_projection(dataset_uri_list)
if mode not in ["union", "intersection", "dataset"]:
raise Exception("Unknown mode %s" % (str(mode)))
if dataset_to_align_index >= len(dataset_uri_list):
raise Exception(
"Alignment index is out of bounds of the datasets index: %s"
"n_elements %s" % (dataset_to_align_index, len(dataset_uri_list)))
if mode == "dataset" and dataset_to_bound_index is None:
raise Exception(
"Mode is 'dataset' but dataset_to_bound_index is not defined")
if mode == "dataset" and (dataset_to_bound_index < 0 or
dataset_to_bound_index >= len(dataset_uri_list)):
raise Exception(
"dataset_to_bound_index is out of bounds of the datasets index: %s"
"n_elements %s" % (dataset_to_bound_index, len(dataset_uri_list)))
def merge_bounding_boxes(bb1, bb2, mode):
"""Helper function to merge two bounding boxes through union or
intersection"""
less_than_or_equal = lambda x, y: x if x <= y else y
greater_than = lambda x, y: x if x > y else y
if mode == "union":
comparison_ops = [
less_than_or_equal, greater_than, greater_than,
less_than_or_equal]
if mode == "intersection":
comparison_ops = [
greater_than, less_than_or_equal, less_than_or_equal,
greater_than]
bb_out = [op(x, y) for op, x, y in zip(comparison_ops, bb1, bb2)]
return bb_out
# get the intersecting or unioned bounding box
if mode == "dataset":
bounding_box = get_bounding_box(
dataset_uri_list[dataset_to_bound_index])
else:
bounding_box = functools.reduce(
functools.partial(merge_bounding_boxes, mode=mode),
[get_bounding_box(dataset_uri) for dataset_uri in dataset_uri_list])
if aoi_uri is not None:
bounding_box = merge_bounding_boxes(
bounding_box, get_datasource_bounding_box(aoi_uri), "intersection")
if (bounding_box[0] >= bounding_box[2] or
bounding_box[1] <= bounding_box[3]) and mode == "intersection":
raise Exception("The datasets' intersection is empty "
"(i.e., not all the datasets touch each other).")
if dataset_to_align_index >= 0:
# bounding box needs alignment
align_bounding_box = get_bounding_box(
dataset_uri_list[dataset_to_align_index])
align_pixel_size = get_cell_size_from_uri(
dataset_uri_list[dataset_to_align_index])
for index in [0, 1]:
n_pixels = int(
(bounding_box[index] - align_bounding_box[index]) /
float(align_pixel_size))
bounding_box[index] = \
n_pixels * align_pixel_size + align_bounding_box[index]
for original_dataset_uri, out_dataset_uri, resample_method, index in zip(
dataset_uri_list, dataset_out_uri_list, resample_method_list,
range(len(dataset_uri_list))):
current_time = time.time()
if current_time - last_time > 5.0:
last_time = current_time
resize_and_resample_dataset_uri(
original_dataset_uri, bounding_box, out_pixel_size,
out_dataset_uri, resample_method)
# If there's an AOI, mask it out
if aoi_uri is not None:
first_dataset = gdal.Open(dataset_out_uri_list[0])
n_rows = first_dataset.RasterYSize
n_cols = first_dataset.RasterXSize
gdal.Dataset.__swig_destroy__(first_dataset)
first_dataset = None
mask_uri = temporary_filename(suffix='.tif')
new_raster_from_base_uri(
dataset_out_uri_list[0], mask_uri, 'GTiff', 255, gdal.GDT_Byte,
fill_value=0)
mask_dataset = gdal.Open(mask_uri, gdal.GA_Update)
mask_band = mask_dataset.GetRasterBand(1)
aoi_datasource = ogr.Open(aoi_uri)
aoi_layer = aoi_datasource.GetLayer()
if all_touched:
option_list = ["ALL_TOUCHED=TRUE"]
else:
option_list = []
gdal.RasterizeLayer(
mask_dataset, [1], aoi_layer, burn_values=[1], options=option_list)
mask_row = numpy.zeros((1, n_cols), dtype=numpy.int8)
out_dataset_list = [
gdal.Open(uri, gdal.GA_Update) for uri in dataset_out_uri_list]
out_band_list = [
dataset.GetRasterBand(1) for dataset in out_dataset_list]
nodata_out_list = [
get_nodata_from_uri(uri) for uri in dataset_out_uri_list]
for row_index in range(n_rows):
mask_row = (mask_band.ReadAsArray(
0, row_index, n_cols, 1) == 0)
for out_band, nodata_out in zip(out_band_list, nodata_out_list):
dataset_row = out_band.ReadAsArray(
0, row_index, n_cols, 1)
out_band.WriteArray(
numpy.where(mask_row, nodata_out, dataset_row),
xoff=0, yoff=row_index)
# Remove the mask aoi if necessary
mask_band = None
gdal.Dataset.__swig_destroy__(mask_dataset)
mask_dataset = None
os.remove(mask_uri)
# Close and clean up datasource
aoi_layer = None
ogr.DataSource.__swig_destroy__(aoi_datasource)
aoi_datasource = None
# Clean up datasets
out_band_list = None
for dataset in out_dataset_list:
dataset.FlushCache()
gdal.Dataset.__swig_destroy__(dataset)
out_dataset_list = None
def assert_datasets_in_same_projection(dataset_uri_list):
"""Assert that provided datasets are all in the same projection.
Tests if datasets represented by their uris are projected and in
the same projection and raises an exception if not.
Args:
dataset_uri_list (list): (description)
Returns:
is_true (boolean): True (otherwise exception raised)
Raises:
DatasetUnprojected: if one of the datasets is unprojected.
DifferentProjections: if at least one of the datasets is in
a different projection
"""
dataset_list = [gdal.Open(dataset_uri) for dataset_uri in dataset_uri_list]
dataset_projections = []
unprojected_datasets = set()
for dataset in dataset_list:
projection_as_str = dataset.GetProjection()
dataset_sr = osr.SpatialReference()
dataset_sr.ImportFromWkt(projection_as_str)
if not dataset_sr.IsProjected():
unprojected_datasets.add(dataset.GetFileList()[0])
dataset_projections.append((dataset_sr, dataset.GetFileList()[0]))
if len(unprojected_datasets) > 0:
pass
for index in range(len(dataset_projections)-1):
if not dataset_projections[index][0].IsSame(
dataset_projections[index+1][0]):
pass
for dataset in dataset_list:
# Close and clean up dataset
gdal.Dataset.__swig_destroy__(dataset)
dataset_list = None
return True
def resize_and_resample_dataset_uri(
original_dataset_uri, bounding_box, out_pixel_size, output_uri,
resample_method):
"""Resize and resample the given dataset.
Args:
original_dataset_uri (string): a GDAL dataset
bounding_box (list): [upper_left_x, upper_left_y, lower_right_x,
lower_right_y]
out_pixel_size: the pixel size in projected linear units
output_uri (string): the location of the new resampled GDAL dataset
resample_method (string): the resampling technique, one of
"nearest|bilinear|cubic|cubic_spline|lanczos"
Returns:
None
"""
resample_dict = {
"nearest": gdal.GRA_NearestNeighbour,
"bilinear": gdal.GRA_Bilinear,
"cubic": gdal.GRA_Cubic,
"cubic_spline": gdal.GRA_CubicSpline,
"lanczos": gdal.GRA_Lanczos
}
original_dataset = gdal.Open(original_dataset_uri)
original_band = original_dataset.GetRasterBand(1)
original_nodata = original_band.GetNoDataValue()
if original_nodata is None:
original_nodata = -9999
original_sr = osr.SpatialReference()
original_sr.ImportFromWkt(original_dataset.GetProjection())
output_geo_transform = [
bounding_box[0], out_pixel_size, 0.0, bounding_box[1], 0.0,
-out_pixel_size]
new_x_size = abs(
int(numpy.round((bounding_box[2] - bounding_box[0]) / out_pixel_size)))
new_y_size = abs(
int(numpy.round((bounding_box[3] - bounding_box[1]) / out_pixel_size)))
if new_x_size == 0:
new_x_size = 1
if new_y_size == 0:
new_y_size = 1
# create the new x and y size
block_size = original_band.GetBlockSize()
# If the original band is tiled, then its x blocksize will be different
# than the number of columns
if original_band.XSize > 256 and original_band.YSize > 256:
# it makes sense for many functions to have 256x256 blocks
block_size[0] = 256
block_size[1] = 256
gtiff_creation_options = [
'TILED=YES', 'BIGTIFF=IF_SAFER', 'BLOCKXSIZE=%d' % block_size[0],
'BLOCKYSIZE=%d' % block_size[1]]
metadata = original_band.GetMetadata('IMAGE_STRUCTURE')
if 'PIXELTYPE' in metadata:
gtiff_creation_options.append('PIXELTYPE=' + metadata['PIXELTYPE'])
else:
# it is so small or strangely aligned, use the default creation options
gtiff_creation_options = []
create_directories([os.path.dirname(output_uri)])
gdal_driver = gdal.GetDriverByName('GTiff')
output_dataset = gdal_driver.Create(
output_uri, new_x_size, new_y_size, 1, original_band.DataType,
options=gtiff_creation_options)
output_band = output_dataset.GetRasterBand(1)
output_band.SetNoDataValue(original_nodata)
# Set the geotransform
output_dataset.SetGeoTransform(output_geo_transform)
output_dataset.SetProjection(original_sr.ExportToWkt())
# need to make this a closure so we get the current time and we can affect
# state
def reproject_callback(df_complete, psz_message, p_progress_arg):
"""The argument names come from the GDAL API for callbacks."""
try:
current_time = time.time()
if ((current_time - reproject_callback.last_time) > 5.0 or
(df_complete == 1.0 and reproject_callback.total_time >= 5.0)):
reproject_callback.last_time = current_time
reproject_callback.total_time += current_time
except AttributeError:
reproject_callback.last_time = time.time()
reproject_callback.total_time = 0.0
# Perform the projection/resampling
gdal.ReprojectImage(
original_dataset, output_dataset, original_sr.ExportToWkt(),
original_sr.ExportToWkt(), resample_dict[resample_method], 0, 0,
reproject_callback, [output_uri])
# Make sure the dataset is closed and cleaned up
original_band = None
gdal.Dataset.__swig_destroy__(original_dataset)
original_dataset = None
output_dataset.FlushCache()
gdal.Dataset.__swig_destroy__(output_dataset)
output_dataset = None
calculate_raster_stats_uri(output_uri)
def create_directories(directory_list):
"""Make directories provided in list of path strings.
This function will create any of the directories in the directory list
if possible and raise exceptions if something exception other than
the directory previously existing occurs.
Args:
directory_list (list): a list of string uri paths
Returns:
None
"""
for dir_name in directory_list:
try:
os.makedirs(dir_name)
except OSError as exception:
#It's okay if the directory already exists, if it fails for
#some other reason, raise that exception
if (exception.errno != errno.EEXIST and
exception.errno != errno.ENOENT):
raise
def get_datasource_bounding_box(datasource_uri):
"""Get datasource bounding box where coordinates are in projected units.
Args:
dataset_uri (string): a uri to a GDAL dataset
Returns:
bounding_box (list):
[upper_left_x, upper_left_y, lower_right_x, lower_right_y] in
projected coordinates
"""
datasource = ogr.Open(datasource_uri)
layer = datasource.GetLayer(0)
extent = layer.GetExtent()
# Reindex datasource extents into the upper left/lower right coordinates
bounding_box = [extent[0],
extent[3],
extent[1],
extent[2]]
return bounding_boxz
def iterblocks(
raster_path, band_index_list=None, largest_block=_LARGEST_ITERBLOCK,
astype=None, offset_only=False):
"""Iterate across all the memory blocks in the input raster.
Result is a generator of block location information and numpy arrays.
This is especially useful when a single value needs to be derived from the
pixel values in a raster, such as the sum total of all pixel values, or
a sequence of unique raster values. In such cases, `raster_local_op`
is overkill, since it writes out a raster.
As a generator, this can be combined multiple times with itertools.izip()
to iterate 'simultaneously' over multiple rasters, though the user should
be careful to do so only with prealigned rasters.
Parameters:
raster_path (string): Path to raster file to iterate over.
band_index_list (list of ints or None): A list of the bands for which
the matrices should be returned. The band number to operate on.
Defaults to None, which will return all bands. Bands may be
specified in any order, and band indexes may be specified multiple
times. The blocks returned on each iteration will be in the order
specified in this list.
largest_block (int): Attempts to iterate over raster blocks with
this many elements. Useful in cases where the blocksize is
relatively small, memory is available, and the function call
overhead dominates the iteration. Defaults to 2**20. A value of
anything less than the original blocksize of the raster will
result in blocksizes equal to the original size.
astype (list of numpy types): If none, output blocks are in the native
type of the raster bands. Otherwise this parameter is a list
of len(band_index_list) length that contains the desired output
types that iterblock generates for each band.
offset_only (boolean): defaults to False, if True `iterblocks` only
returns offset dictionary and doesn't read any binary data from
the raster. This can be useful when iterating over writing to
an output.
Returns:
If `offset_only` is false, on each iteration, a tuple containing a dict
of block data and `n` 2-dimensional numpy arrays are returned, where
`n` is the number of bands requested via `band_list`. The dict of
block data has these attributes:
data['xoff'] - The X offset of the upper-left-hand corner of the
block.
data['yoff'] - The Y offset of the upper-left-hand corner of the
block.
data['win_xsize'] - The width of the block.
data['win_ysize'] - The height of the block.
If `offset_only` is True, the function returns only the block offset
data and does not attempt to read binary data from the raster.
"""
raster = gdal.OpenEx(raster_path)
if band_index_list is None:
band_index_list = range(1, raster.RasterCount + 1)
band_index_list = [
raster.GetRasterBand(index) for index in band_index_list]
block = band_index_list[0].GetBlockSize()
cols_per_block = block[0]
rows_per_block = block[1]
n_cols = raster.RasterXSize
n_rows = raster.RasterYSize
block_area = cols_per_block * rows_per_block
# try to make block wider
if largest_block / block_area > 0:
width_factor = largest_block / block_area
cols_per_block *= width_factor
if cols_per_block > n_cols:
cols_per_block = n_cols
block_area = cols_per_block * rows_per_block
# try to make block taller
if largest_block / block_area > 0:
height_factor = largest_block / block_area
rows_per_block *= height_factor
if rows_per_block > n_rows:
rows_per_block = n_rows
n_col_blocks = int(math.ceil(n_cols / float(cols_per_block)))
n_row_blocks = int(math.ceil(n_rows / float(rows_per_block)))
# Initialize to None so a block array is created on the first iteration
last_row_block_width = None
last_col_block_width = None
if astype is not None:
block_type_list = [astype] * len(band_index_list)
else:
block_type_list = [
_gdal_to_numpy_type(ds_band) for ds_band in band_index_list]
for row_block_index in range(n_row_blocks):
row_offset = row_block_index * rows_per_block
row_block_width = n_rows - row_offset
if row_block_width > rows_per_block:
row_block_width = rows_per_block
for col_block_index in range(n_col_blocks):
col_offset = col_block_index * cols_per_block
col_block_width = n_cols - col_offset
if col_block_width > cols_per_block:
col_block_width = cols_per_block
# resize the raster block cache if necessary
if (last_row_block_width != row_block_width or
last_col_block_width != col_block_width):
raster_blocks = [
numpy.zeros(
(row_block_width, col_block_width),
dtype=block_type) for block_type in
block_type_list]
offset_dict = {
'xoff': col_offset,
'yoff': row_offset,
'win_xsize': col_block_width,
'win_ysize': row_block_width,
}
result = offset_dict
if not offset_only:
for ds_band, block in zip(band_index_list, raster_blocks):
ds_band.ReadAsArray(buf_obj=block, **offset_dict)
result = (result,) + tuple(raster_blocks)
yield result
def get_vector_info(vector_path, layer_index=0):
"""Get information about an OGR vector (datasource).
Parameters:
vector_path (str): a path to a OGR vector.
layer_index (int): index of underlying layer to analyze. Defaults to
0.
Returns:
raster_properties (dictionary): a dictionary with the following
properties stored under relevant keys.
'projection' (string): projection of the vector in Well Known
Text.
'bounding_box' (list): list of floats representing the bounding
box in projected coordinates as [minx, miny, maxx, maxy].
"""
vector = gdal.OpenEx(vector_path)
vector_properties = {}
layer = vector.GetLayer(iLayer=layer_index)
# projection is same for all layers, so just use the first one
vector_properties['projection'] = layer.GetSpatialRef().ExportToWkt()
layer_bb = layer.GetExtent()
layer = None
vector = None
# convert form [minx,maxx,miny,maxy] to [minx,miny,maxx,maxy]
vector_properties['bounding_box'] = [layer_bb[i] for i in [0, 2, 1, 3]]
return vector_properties
def _merge_bounding_boxes(bb1, bb2, mode):
"""Merge two bounding boxes through union or intersection.
Parameters:
bb1, bb2 (list): list of float representing bounding box in the
form bb=[minx,miny,maxx,maxy]
mode (string); one of 'union' or 'intersection'
Returns:
Reduced bounding box of bb1/bb2 depending on mode.
"""
if mode == "union":
comparison_ops = [
_less_than_or_equal, _less_than_or_equal,
_greater_than, _greater_than]
if mode == "intersection":
comparison_ops = [
_greater_than, _greater_than,
_less_than_or_equal, _less_than_or_equal]
bb_out = [op(x, y) for op, x, y in zip(comparison_ops, bb1, bb2)]
return bb_out
def _invoke_timed_callback(
reference_time, callback_lambda, callback_period):
"""Invoke callback if a certain amount of time has passed.
This is a convenience function to standardize update callbacks from the
module.
Parameters:
reference_time (float): time to base `callback_period` length from.
callback_lambda (lambda): function to invoke if difference between
current time and `reference_time` has exceeded `callback_period`.
callback_period (float): time in seconds to pass until
`callback_lambda` is invoked.
Returns:
`reference_time` if `callback_lambda` not invoked, otherwise the time
when `callback_lambda` was invoked.
"""
current_time = time.time()
if current_time - reference_time > callback_period:
callback_lambda()
return current_time
return reference_time
def align_and_resize_raster_stack(
base_raster_path_list, target_raster_path_list, resample_method_list,
target_pixel_size, bounding_box_mode, base_vector_path_list=None,
raster_align_index=None,
gtiff_creation_options=_DEFAULT_GTIFF_CREATION_OPTIONS):
"""Generate rasters from a base such that they align geospatially.
This function resizes base rasters that are in the same geospatial
projection such that the result is an aligned stack of rasters that have
the same cell size, dimensions, and bounding box. This is achieved by
clipping or resizing the rasters to intersected, unioned, or equivocated
bounding boxes of all the raster and vector input.
Parameters:
base_raster_path_list (list): a list of base raster paths that will
be transformed and will be used to determine the target bounding
box.
target_raster_path_list (list): a list of raster paths that will be
created to one-to-one map with `base_raster_path_list` as aligned
versions of those original rasters.
resample_method_list (list): a list of resampling methods which
one to one map each path in `base_raster_path_list` during
resizing. Each element must be one of
"nearest|bilinear|cubic|cubic_spline|lanczos|mode".
target_pixel_size (tuple): the target raster's x and y pixel size
example: [30, -30].
bounding_box_mode (string): one of "union", "intersection", or
a list of floats of the form [minx, miny, maxx, maxy]. Depending
on the value, output extents are defined as the union,
intersection, or the explicit bounding box.
base_vector_path_list (list): a list of base vector paths whose
bounding boxes will be used to determine the final bounding box
of the raster stack if mode is 'union' or 'intersection'. If mode
is 'bb=[...]' then these vectors are not used in any calculation.
raster_align_index (int): indicates the index of a
raster in `base_raster_path_list` that the target rasters'
bounding boxes pixels should align with. This feature allows
rasters whose raster dimensions are the same, but bounding boxes
slightly shifted less than a pixel size to align with a desired
grid layout. If `None` then the bounding box of the target
rasters is calculated as the precise intersection, union, or
bounding box.
gtiff_creation_options (list): list of strings that will be passed
as GDAL "dataset" creation options to the GTIFF driver, or ignored
if None.
Returns:
None
"""
last_time = time.time()
# make sure that the input lists are of the same length
list_lengths = [
len(base_raster_path_list), len(target_raster_path_list),
len(resample_method_list)]
if len(set(list_lengths)) != 1:
raise ValueError(
"base_raster_path_list, target_raster_path_list, and "
"resample_method_list must be the same length "
" current lengths are %s" % (str(list_lengths)))
# we can accept 'union', 'intersection', or a 4 element list/tuple
if bounding_box_mode not in ["union", "intersection"] and (
not isinstance(bounding_box_mode, (list, tuple)) or
len(bounding_box_mode) != 4):
raise ValueError("Unknown bounding_box_mode %s" % (
str(bounding_box_mode)))
if ((raster_align_index is not None) and
((raster_align_index < 0) or
(raster_align_index >= len(base_raster_path_list)))):
raise ValueError(
"Alignment index is out of bounds of the datasets index: %s"
" n_elements %s" % (
raster_align_index, len(base_raster_path_list)))
raster_info_list = [
get_raster_info(path) for path in base_raster_path_list]
if base_vector_path_list is not None:
vector_info_list = [
get_vector_info(path) for path in base_vector_path_list]
else:
vector_info_list = []
# get the literal or intersecting/unioned bounding box
if isinstance(bounding_box_mode, (list, tuple)):
target_bounding_box = bounding_box_mode
else:
# either intersection or union
from functools import reduce
target_bounding_box = reduce(
functools.partial(_merge_bounding_boxes, mode=bounding_box_mode),
[info['bounding_box'] for info in
(raster_info_list + vector_info_list)])
if bounding_box_mode == "intersection" and (
target_bounding_box[0] > target_bounding_box[2] or
target_bounding_box[1] > target_bounding_box[3]):
raise ValueError("The rasters' and vectors' intersection is empty "
"(not all rasters and vectors touch each other).")
if raster_align_index >= 0:
# bounding box needs alignment
align_bounding_box = (
raster_info_list[raster_align_index]['bounding_box'])
align_pixel_size = (
raster_info_list[raster_align_index]['pixel_size'])
# adjust bounding box so lower left corner aligns with a pixel in
# raster[raster_align_index]
for index in [0, 1]:
n_pixels = int(
(target_bounding_box[index] - align_bounding_box[index]) /
float(align_pixel_size[index]))
target_bounding_box[index] = (
n_pixels * align_pixel_size[index] +
align_bounding_box[index])
for index, (base_path, target_path, resample_method) in enumerate(zip(
base_raster_path_list, target_raster_path_list,
resample_method_list)):
last_time = _invoke_timed_callback(
last_time, lambda: LOGGER.info(
"align_dataset_list aligning dataset %d of %d",
index, len(base_raster_path_list)), _LOGGING_PERIOD)
warp_raster(
base_path, target_pixel_size,
target_path, resample_method,
target_bb=target_bounding_box,
gtiff_creation_options=gtiff_creation_options)
def warp_raster(
base_raster_path, target_pixel_size, target_raster_path,
resample_method, target_bb=None, target_sr_wkt=None,
gtiff_creation_options=_DEFAULT_GTIFF_CREATION_OPTIONS):
"""Resize/resample raster to desired pixel size, bbox and projection.
Parameters:
base_raster_path (string): path to base raster.
target_pixel_size (list): a two element list or tuple indicating the
x and y pixel size in projected units.
target_raster_path (string): the location of the resized and
resampled raster.
resample_method (string): the resampling technique, one of
"nearest|bilinear|cubic|cubic_spline|lanczos|mode"
target_bb (list): if None, target bounding box is the same as the
source bounding box. Otherwise it's a list of float describing
target bounding box in target coordinate system as
[minx, miny, maxx, maxy].
target_sr_wkt (string): if not None, desired target projection in Well
Known Text format.
gtiff_creation_options (list or tuple): list of strings that will be
passed as GDAL "dataset" creation options to the GTIFF driver.
Returns:
None
"""
base_raster = gdal.OpenEx(base_raster_path)
base_sr = osr.SpatialReference()
base_sr.ImportFromWkt(base_raster.GetProjection())
if target_bb is None:
target_bb = get_raster_info(base_raster_path)['bounding_box']
# transform the target_bb if target_sr_wkt is not None
if target_sr_wkt is not None:
target_bb = transform_bounding_box(
get_raster_info(base_raster_path)['bounding_box'],
get_raster_info(base_raster_path)['projection'],
target_sr_wkt)
target_geotransform = [
target_bb[0], target_pixel_size[0], 0.0, target_bb[1], 0.0,
target_pixel_size[1]]
# this handles a case of a negative pixel size in which case the raster
# row will increase downward
if target_pixel_size[0] < 0:
target_geotransform[0] = target_bb[2]
if target_pixel_size[1] < 0:
target_geotransform[3] = target_bb[3]
target_x_size = abs((target_bb[2] - target_bb[0]) / target_pixel_size[0])
target_y_size = abs((target_bb[3] - target_bb[1]) / target_pixel_size[1])
if target_x_size - int(target_x_size) > 0:
target_x_size = int(target_x_size) + 1
else:
target_x_size = int(target_x_size)
if target_y_size - int(target_y_size) > 0:
target_y_size = int(target_y_size) + 1
else:
target_y_size = int(target_y_size)
if target_x_size == 0:
LOGGER.warn(
"bounding_box is so small that x dimension rounds to 0; "
"clamping to 1.")
target_x_size = 1
if target_y_size == 0:
LOGGER.warn(
"bounding_box is so small that y dimension rounds to 0; "
"clamping to 1.")
target_y_size = 1
local_gtiff_creation_options = list(gtiff_creation_options)
# PIXELTYPE is sometimes used to define signed vs. unsigned bytes and
# the only place that is stored is in the IMAGE_STRUCTURE metadata
# copy it over if it exists; get this info from the first band since
# all bands have the same datatype
base_band = base_raster.GetRasterBand(1)
metadata = base_band.GetMetadata('IMAGE_STRUCTURE')
if 'PIXELTYPE' in metadata:
local_gtiff_creation_options.append(
'PIXELTYPE=' + metadata['PIXELTYPE'])
# make directory if it doesn't exist
try:
os.makedirs(os.path.dirname(target_raster_path))
except OSError:
pass
gdal_driver = gdal.GetDriverByName('GTiff')
target_raster = gdal_driver.Create(
target_raster_path, target_x_size, target_y_size,
base_raster.RasterCount, base_band.DataType,
options=local_gtiff_creation_options)
base_band = None
for index in range(target_raster.RasterCount):
base_nodata = base_raster.GetRasterBand(1+index).GetNoDataValue()
if base_nodata is not None:
target_band = target_raster.GetRasterBand(1+index)
target_band.SetNoDataValue(base_nodata)
target_band = None
# Set the geotransform
target_raster.SetGeoTransform(target_geotransform)
if target_sr_wkt is None:
target_sr_wkt = base_sr.ExportToWkt()
target_raster.SetProjection(target_sr_wkt)
# need to make this a closure so we get the current time and we can affect
# state
reproject_callback = _make_logger_callback(
"ReprojectImage %.1f%% complete %s, psz_message '%s'")
# Perform the projection/resampling
gdal.ReprojectImage(
base_raster, target_raster, base_sr.ExportToWkt(),
target_sr_wkt, _RESAMPLE_DICT[resample_method], 0, 0,
reproject_callback, [target_raster_path])
target_raster = None
base_raster = None
calculate_raster_stats(target_raster_path)
def transform_bounding_box(
bounding_box, base_ref_wkt, target_ref_wkt, edge_samples=11):
"""Transform input bounding box to output projection.
This transform accounts for the fact that the reprojected square bounding
box might be warped in the new coordinate system. To account for this,
the function samples points along the original bounding box edges and
attempts to make the largest bounding box around any transformed point
on the edge whether corners or warped edges.
Parameters:
bounding_box (list): a list of 4 coordinates in `base_epsg` coordinate
system describing the bound in the order [xmin, ymin, xmax, ymax]
base_ref_wkt (string): the spatial reference of the input coordinate
system in Well Known Text.
target_ref_wkt (string): the spatial reference of the desired output
coordinate system in Well Known Text.
edge_samples (int): the number of interpolated points along each
bounding box edge to sample along. A value of 2 will sample just
the corners while a value of 3 will also sample the corners and
the midpoint.
Returns:
A list of the form [xmin, ymin, xmax, ymax] that describes the largest
fitting bounding box around the original warped bounding box in
`new_epsg` coordinate system.
"""
base_ref = osr.SpatialReference()
base_ref.ImportFromWkt(base_ref_wkt)
target_ref = osr.SpatialReference()
target_ref.ImportFromWkt(target_ref_wkt)
transformer = osr.CoordinateTransformation(base_ref, target_ref)
def _transform_point(point):
"""Transform an (x,y) point tuple from base_ref to target_ref."""
trans_x, trans_y, _ = (transformer.TransformPoint(*point))
return (trans_x, trans_y)
# The following list comprehension iterates over each edge of the bounding
# box, divides each edge into `edge_samples` number of points, then
# reduces that list to an appropriate `bounding_fn` given the edge.
# For example the left edge needs to be the minimum x coordinate so
# we generate `edge_samples` number of points between the upper left and
# lower left point, transform them all to the new coordinate system
# then get the minimum x coordinate "min(p[0] ...)" of the batch.
# points are numbered from 0 starting upper right as follows:
# 0--3
# | |
# 1--2
p_0 = numpy.array((bounding_box[0], bounding_box[3]))
p_1 = numpy.array((bounding_box[0], bounding_box[1]))
p_2 = numpy.array((bounding_box[2], bounding_box[1]))
p_3 = numpy.array((bounding_box[2], bounding_box[3]))
transformed_bounding_box = [
bounding_fn(
[_transform_point(
p_a * v + p_b * (1 - v)) for v in numpy.linspace(
0, 1, edge_samples)])
for p_a, p_b, bounding_fn in [
(p_0, p_1, lambda p_list: min([p[0] for p in p_list])),
(p_1, p_2, lambda p_list: min([p[1] for p in p_list])),
(p_2, p_3, lambda p_list: max([p[0] for p in p_list])),
(p_3, p_0, lambda p_list: max([p[1] for p in p_list]))]]
return transformed_bounding_box
def _make_logger_callback(message):
"""Build a timed logger callback that prints `message` replaced.
Parameters:
message (string): a string that expects 3 placement %% variables,
first for % complete from `df_complete`, second `psz_message`
and last is `p_progress_arg[0]`.
Returns:
Function with signature:
logger_callback(df_complete, psz_message, p_progress_arg)
"""
def logger_callback(df_complete, psz_message, p_progress_arg):
"""The argument names come from the GDAL API for callbacks."""
try:
current_time = time.time()
if ((current_time - logger_callback.last_time) > 5.0 or
(df_complete == 1.0 and
logger_callback.total_time >= 5.0)):
LOGGER.info(
message, df_complete * 100, p_progress_arg[0],
psz_message)
logger_callback.last_time = current_time
logger_callback.total_time += current_time
except AttributeError:
logger_callback.last_time = time.time()
logger_callback.total_time = 0.0
return logger_callback
def _is_raster_path_band_formatted(raster_path_band):
"""Returns true if raster path band is a (str, int) tuple/list."""
if not isinstance(raster_path_band, (list, tuple)):
return False
elif len(raster_path_band) != 2:
return False
elif not isinstance(raster_path_band[0], str):
return False
elif not isinstance(raster_path_band[1], int):
return False
else:
return True
def zonal_statistics(
base_raster_path_band, aggregate_vector_path,
aggregate_field_name, aggregate_layer_name=None,
ignore_nodata=True, all_touched=False, polygons_might_overlap=True,
working_dir=None):
"""Collect stats on pixel values which lie within polygons.
This function summarizes raster statistics including min, max,
mean, stddev, and pixel count over the regions on the raster that are
overlaped by the polygons in the vector layer. This function can
handle cases where polygons overlap, which is notable since zonal stats
functions provided by ArcGIS or QGIS usually incorrectly aggregate
these areas. Overlap avoidance is achieved by calculating a minimal set
of disjoint non-overlapping polygons from `aggregate_vector_path` and
rasterizing each set separately during the raster aggregation phase. That
set of rasters are then used to calculate the zonal stats of all polygons
without aggregating vector overlap.
Parameters:
base_raster_path_band (tuple): a str/int tuple indicating the path to
the base raster and the band index of that raster to analyze.
aggregate_vector_path (string): a path to an ogr compatable polygon
vector whose geometric features indicate the areas over
`base_raster_path_band` to calculate statistics over.
aggregate_field_name (string): field name in `aggregate_vector_path`
that represents an identifying value for a given polygon. Result
of this function will be indexed by the values found in this
field.
aggregate_layer_name (string): name of shapefile layer that will be
used to aggregate results over. If set to None, the first layer
in the DataSource will be used as retrieved by `.GetLayer()`.
Note: it is normal and expected to set this field at None if the
aggregating shapefile is a single layer as many shapefiles,
including the common 'ESRI Shapefile', are.
ignore_nodata: if true, then nodata pixels are not accounted for when
calculating min, max, count, or mean. However, the value of
`nodata_count` will always be the number of nodata pixels
aggregated under the polygon.
all_touched (boolean): if true will account for any pixel whose
geometry passes through the pixel, not just the center point.
polygons_might_overlap (boolean): if True the function calculates
aggregation coverage close to optimally by rasterizing sets of
polygons that don't overlap. However, this step can be
computationally expensive for cases where there are many polygons.
Setting this flag to False directs the function rasterize in one
step.
working_dir (string): If not None, indicates where temporary files
should be created during this run.
Returns:
nested dictionary indexed by aggregating feature id, and then by one
of 'min' 'max' 'sum' 'mean' 'count' and 'nodata_count'. Example:
{0: {'min': 0, 'max': 1, 'mean': 0.5, 'count': 2, 'nodata_count': 1}}
"""
import uuid
import shutil
if not _is_raster_path_band_formatted(base_raster_path_band):
raise ValueError(
"`base_raster_path_band` not formatted as expected. Expects "
"(path, band_index), recieved %s" + base_raster_path_band)
aggregate_vector = gdal.OpenEx(aggregate_vector_path)
if aggregate_layer_name is not None:
aggregate_layer = aggregate_vector.GetLayerByName(
aggregate_layer_name)
else:
aggregate_layer = aggregate_vector.GetLayer()
aggregate_layer_defn = aggregate_layer.GetLayerDefn()
aggregate_field_index = aggregate_layer_defn.GetFieldIndex(
aggregate_field_name)
if aggregate_field_index == -1: # -1 returned when field does not exist.
# Raise exception if user provided a field that's not in vector
raise ValueError(
'Vector %s must have a field named %s' %
(aggregate_vector_path, aggregate_field_name))
aggregate_field_def = aggregate_layer_defn.GetFieldDefn(
aggregate_field_index)
# create a new aggregate ID field to map base vector aggregate fields to
# local ones that are guaranteed to be integers.
local_aggregate_field_name = str(uuid.uuid4())[-8:-1]
local_aggregate_field_def = ogr.FieldDefn(
local_aggregate_field_name, ogr.OFTInteger)
# Adding the rasterize by attribute option
rasterize_layer_args = {
'options': [
'ALL_TOUCHED=%s' % str(all_touched).upper(),
'ATTRIBUTE=%s' % local_aggregate_field_name]
}
# clip base raster to aggregating vector intersection
raster_info = get_raster_info(base_raster_path_band[0])
# -1 here because bands are 1 indexed
print(raster_info)
raster_nodata = None
with tempfile.NamedTemporaryFile(
prefix='clipped_raster', delete=False,
dir=working_dir) as clipped_raster_file:
clipped_raster_path = clipped_raster_file.name
align_and_resize_raster_stack(
[base_raster_path_band[0]], [clipped_raster_path], ['nearest'],
raster_info['pixel_size'], 'intersection',
base_vector_path_list=[aggregate_vector_path], raster_align_index=0)
clipped_raster = gdal.OpenEx(clipped_raster_path)
# make a shapefile that non-overlapping layers can be added to
driver = ogr.GetDriverByName('ESRI Shapefile')
disjoint_vector_dir = tempfile.mkdtemp(dir=working_dir)
disjoint_vector = driver.CreateDataSource(
os.path.join(disjoint_vector_dir, 'disjoint_vector.shp'))
spat_ref = aggregate_layer.GetSpatialRef()
# Initialize these dictionaries to have the shapefile fields in the
# original datasource even if we don't pick up a value later
base_to_local_aggregate_value = {}
for feature in aggregate_layer:
aggregate_field_value = feature.GetField(aggregate_field_name)
# this builds up a map of aggregate field values to unique ids
if aggregate_field_value not in base_to_local_aggregate_value:
base_to_local_aggregate_value[aggregate_field_value] = len(
base_to_local_aggregate_value)
aggregate_layer.ResetReading()
# Loop over each polygon and aggregate
if polygons_might_overlap:
minimal_polygon_sets = calculate_disjoint_polygon_set(
aggregate_vector_path)
else:
minimal_polygon_sets = [
set([feat.GetFID() for feat in aggregate_layer])]
clipped_band = clipped_raster.GetRasterBand(base_raster_path_band[1])
with tempfile.NamedTemporaryFile(
prefix='aggregate_id_raster',
delete=False, dir=working_dir) as aggregate_id_raster_file:
aggregate_id_raster_path = aggregate_id_raster_file.name
aggregate_id_nodata = len(base_to_local_aggregate_value)
new_raster_from_base(
clipped_raster_path, aggregate_id_raster_path, gdal.GDT_Int32,
[aggregate_id_nodata])
aggregate_id_raster = gdal.OpenEx(aggregate_id_raster_path, gdal.GA_Update)
aggregate_stats = {}
for polygon_set in minimal_polygon_sets:
disjoint_layer = disjoint_vector.CreateLayer(
'disjoint_vector', spat_ref, ogr.wkbPolygon)
disjoint_layer.CreateField(local_aggregate_field_def)
# add polygons to subset_layer
for index, poly_fid in enumerate(polygon_set):
poly_feat = aggregate_layer.GetFeature(poly_fid)
disjoint_layer.CreateFeature(poly_feat)
# we seem to need to reload the feature and set the index because
# just copying over the feature left indexes as all 0s. Not sure
# why.
new_feat = disjoint_layer.GetFeature(index)
new_feat.SetField(
local_aggregate_field_name, base_to_local_aggregate_value[
poly_feat.GetField(aggregate_field_name)])
disjoint_layer.SetFeature(new_feat)
disjoint_layer.SyncToDisk()
# nodata out the mask
aggregate_id_band = aggregate_id_raster.GetRasterBand(1)
aggregate_id_band.Fill(aggregate_id_nodata)
aggregate_id_band = None
gdal.RasterizeLayer(
aggregate_id_raster, [1], disjoint_layer, **rasterize_layer_args)
aggregate_id_raster.FlushCache()
# Delete the features we just added to the subset_layer
disjoint_layer = None
disjoint_vector.DeleteLayer(0)
# create a key array
# and parallel min, max, count, and nodata count arrays
for aggregate_id_offsets, aggregate_id_block in iterblocks(
aggregate_id_raster_path):
clipped_block = clipped_band.ReadAsArray(**aggregate_id_offsets)
# guard against a None nodata type
valid_mask = numpy.ones(aggregate_id_block.shape, dtype=bool)
if aggregate_id_nodata is not None:
valid_mask[:] = aggregate_id_block != aggregate_id_nodata
valid_aggregate_id = aggregate_id_block[valid_mask]
valid_clipped = clipped_block[valid_mask]
for aggregate_id in numpy.unique(valid_aggregate_id):
aggregate_mask = valid_aggregate_id == aggregate_id
masked_clipped_block = valid_clipped[aggregate_mask]
clipped_nodata_mask = (masked_clipped_block == raster_nodata)
if aggregate_id not in aggregate_stats:
aggregate_stats[aggregate_id] = {
'min': None,
'max': None,
'count': 0,
'nodata_count': 0,
'sum': 0.0
}
aggregate_stats[aggregate_id]['nodata_count'] += (
numpy.count_nonzero(clipped_nodata_mask))
if ignore_nodata:
masked_clipped_block = (
masked_clipped_block[~clipped_nodata_mask])
if masked_clipped_block.size == 0:
continue
if aggregate_stats[aggregate_id]['min'] is None:
aggregate_stats[aggregate_id]['min'] = (
masked_clipped_block[0])
aggregate_stats[aggregate_id]['max'] = (
masked_clipped_block[0])
aggregate_stats[aggregate_id]['min'] = min(
numpy.min(masked_clipped_block),
aggregate_stats[aggregate_id]['min'])
aggregate_stats[aggregate_id]['max'] = max(
numpy.max(masked_clipped_block),
aggregate_stats[aggregate_id]['max'])
aggregate_stats[aggregate_id]['count'] += (
masked_clipped_block.size)
aggregate_stats[aggregate_id]['sum'] += numpy.sum(
masked_clipped_block)
# clean up temporary files
clipped_band = None
clipped_raster = None
aggregate_id_raster = None
disjoint_layer = None
disjoint_vector = None
for filename in [aggregate_id_raster_path, clipped_raster_path]:
os.remove(filename)
shutil.rmtree(disjoint_vector_dir)
# map the local ids back to the original base value
local_to_base_aggregate_value = {
value: key for key, value in
base_to_local_aggregate_value.iteritems()}
return {
local_to_base_aggregate_value[key]: value
for key, value in aggregate_stats.iteritems()}
def calculate_disjoint_polygon_set(vector_path, layer_index=0):
"""Create a list of sets of polygons that don't overlap.
Determining the minimal number of those sets is an np-complete problem so
this is an approximation that builds up sets of maximal subsets.
Parameters:
vector_path (string): a path to an OGR vector.
layer_index (int): index of underlying layer in `vector_path` to
calculate disjoint set. Defaults to 0.
Returns:
subset_list (list): list of sets of FIDs from vector_path
"""
import heapq
vector = gdal.OpenEx(vector_path)
vector_layer = vector.GetLayer()
poly_intersect_lookup = {}
for poly_feat in vector_layer:
poly_wkt = poly_feat.GetGeometryRef().ExportToWkt()
shapely_polygon = shapely.wkt.loads(poly_wkt)
poly_wkt = None
poly_fid = poly_feat.GetFID()
poly_intersect_lookup[poly_fid] = {
'poly': shapely_polygon,
'intersects': set(),
}
vector_layer = None
vector = None
for poly_fid in poly_intersect_lookup:
polygon = shapely.prepared.prep(
poly_intersect_lookup[poly_fid]['poly'])
for intersect_poly_fid in poly_intersect_lookup:
if intersect_poly_fid == poly_fid or polygon.intersects(
poly_intersect_lookup[intersect_poly_fid]['poly']):
poly_intersect_lookup[poly_fid]['intersects'].add(
intersect_poly_fid)
polygon = None
# Build maximal subsets
subset_list = []
while len(poly_intersect_lookup) > 0:
# sort polygons by increasing number of intersections
heap = []
for poly_fid, poly_dict in poly_intersect_lookup.iteritems():
heapq.heappush(
heap, (len(poly_dict['intersects']), poly_fid, poly_dict))
# build maximal subset
maximal_set = set()
while len(heap) > 0:
_, poly_fid, poly_dict = heapq.heappop(heap)
for maxset_fid in maximal_set:
if maxset_fid in poly_intersect_lookup[poly_fid]['intersects']:
# it intersects and can't be part of the maximal subset
break
else:
# made it through without an intersection, add poly_fid to
# the maximal set
maximal_set.add(poly_fid)
# remove that polygon and update the intersections
del poly_intersect_lookup[poly_fid]
# remove all the polygons from intersections once they're compuated
for maxset_fid in maximal_set:
for poly_dict in poly_intersect_lookup.itervalues():
poly_dict['intersects'].discard(maxset_fid)
subset_list.append(maximal_set)
return subset_list
def calculate_raster_stats(raster_path):
"""Calculate and set min, max, stdev, and mean for all bands in raster.
Parameters:
raster_path (string): a path to a GDAL raster raster that will be
modified by having its band statistics set
Returns:
None
"""
raster = gdal.OpenEx(raster_path, gdal.GA_Update)
raster_properties = get_raster_info(raster_path)
for band_index in range(raster.RasterCount):
target_min = None
target_max = None
target_n = 0
target_sum = 0.0
for _, target_block in iterblocks(
raster_path, band_index_list=[band_index+1]):
nodata_target = raster_properties['nodata'][band_index]
# guard against an undefined nodata target
valid_mask = numpy.ones(target_block.shape, dtype=bool)
if nodata_target is not None:
valid_mask[:] = target_block != nodata_target
valid_block = target_block[valid_mask]
if valid_block.size == 0:
continue
if target_min is None:
# initialize first min/max
target_min = target_max = valid_block[0]
target_sum += numpy.sum(valid_block)
target_min = min(numpy.min(valid_block), target_min)
target_max = max(numpy.max(valid_block), target_max)
target_n += valid_block.size
if target_min is not None:
target_mean = target_sum / float(target_n)
stdev_sum = 0.0
for _, target_block in iterblocks(
raster_path, band_index_list=[band_index+1]):
# guard against an undefined nodata target
valid_mask = numpy.ones(target_block.shape, dtype=bool)
if nodata_target is not None:
valid_mask = target_block != nodata_target
valid_block = target_block[valid_mask]
stdev_sum += numpy.sum((valid_block - target_mean) ** 2)
target_stddev = (stdev_sum / float(target_n)) ** 0.5
target_band = raster.GetRasterBand(band_index+1)
target_band.SetStatistics(
float(target_min), float(target_max), float(target_mean),
float(target_stddev))
target_band = None
else:
LOGGER.warn(
"Stats not calculated for %s band %d since no non-nodata "
"pixels were found.", raster_path, band_index+1)
raster = None
| 39.560721 | 120 | 0.662314 | import os
import pdb
import time
import errno
import shapely.wkt
import shapely.ops
from shapely import speedups
import shapely.prepared
import subprocess
import logging
import tempfile
import distutils.version
import atexit
import functools
import math
import numpy
import gdal
import gdalconst
from osgeo import gdal
from osgeo import osr
from osgeo import ogr
LOGGER = logging.getLogger('pygeoprocessing.geoprocessing')
LOGGER.addHandler(logging.NullHandler()) # silence logging by default
_LOGGING_PERIOD = 5.0 # min 5.0 seconds per update log message for the module
_DEFAULT_GTIFF_CREATION_OPTIONS = (
'TILED=YES', 'BIGTIFF=IF_SAFER', 'COMPRESS=LZW')
_LARGEST_ITERBLOCK = 2**20 # largest block for iterblocks to read in cells
# A dictionary to map the resampling method input string to the gdal type
_RESAMPLE_DICT = {
"nearest": gdal.GRA_NearestNeighbour,
"bilinear": gdal.GRA_Bilinear,
"cubic": gdal.GRA_Cubic,
"cubic_spline": gdal.GRA_CubicSpline,
"lanczos": gdal.GRA_Lanczos,
'mode': gdal.GRA_Mode,
'average': gdal.GRA_Average,
}
# GDAL 2.2.3 added a couple of useful interpolation values.
if (distutils.version.LooseVersion(gdal.__version__)
>= distutils.version.LooseVersion('2.2.3')):
_RESAMPLE_DICT.update({
'max': gdal.GRA_Max,
'min': gdal.GRA_Min,
'med': gdal.GRA_Med,
'q1': gdal.GRA_Q1,
'q3': gdal.GRA_Q3,
})
def convert_raster_to_ascii(path_input_raster, path_ascii_output, overwrite=True):
"""
Convert input raster to ascii format
Args:
path_input_raster:
path_ascii_output:
overwrite:
Returns:
"""
if overwrite and os.path.isfile(path_ascii_output):
os.remove(path_ascii_output)
# Open existing dataset
path_inp_ds = gdal.Open(path_input_raster)
# Open output format driver, gdal_translate --formats lists all of them
format_file = 'AAIGrid'
driver = gdal.GetDriverByName(format_file)
# Output to new format
path_dest_ds = driver.CreateCopy(path_ascii_output, path_inp_ds, 0)
# Close the datasets to flush to disk
path_dest_ds = None
path_inp_ds = None
def get_dataset_type(path_ds):
"""
Return dataset type e.g. GeoTiff
Args:
path_ds:
Returns:
"""
dataset = gdal.Open(path_ds, gdalconst.GA_ReadOnly)
dataset_type = dataset.GetDriver().LongName
dataset = None # Close dataset
return dataset_type
def get_dataset_datatype(path_ds):
"""
Return datatype of dataset e.g. GDT_UInt32
Args:
path_ds:
Returns:
"""
dataset = gdal.Open(path_ds, gdalconst.GA_ReadOnly)
band = dataset.GetRasterBand(1)
bandtype = gdal.GetDataTypeName(band.DataType) # UInt32
dataset = None # Close dataset
if bandtype == 'UInt32':
return gdalconst.GDT_UInt32
elif bandtype == 'UInt16':
return gdalconst.GDT_UInt16
elif bandtype == 'Float32':
return gdalconst.GDT_Float32
elif bandtype == 'Float64':
return gdalconst.GDT_Float64
elif bandtype == 'Int16':
return gdalconst.GDT_Int16
elif bandtype == 'Int32':
return gdalconst.GDT_Int32
elif bandtype == 'Unknown':
return gdalconst.GDT_Unknown
else:
return gdalconst.GDT_UInt32
def _gdal_to_numpy_type(band):
"""Calculates the equivalent numpy datatype from a GDAL raster band type
band - GDAL band
returns numpy equivalent of band.DataType"""
gdal_type_to_numpy_lookup = {
gdal.GDT_Int16: numpy.int16,
gdal.GDT_Int32: numpy.int32,
gdal.GDT_UInt16: numpy.uint16,
gdal.GDT_UInt32: numpy.uint32,
gdal.GDT_Float32: numpy.float32,
gdal.GDT_Float64: numpy.float64
}
if band.DataType in gdal_type_to_numpy_lookup:
return gdal_type_to_numpy_lookup[band.DataType]
#only class not in the lookup is a Byte but double check.
if band.DataType != gdal.GDT_Byte:
raise ValueError("Unknown DataType: %s" % str(band.DataType))
metadata = band.GetMetadata('IMAGE_STRUCTURE')
if 'PIXELTYPE' in metadata and metadata['PIXELTYPE'] == 'SIGNEDBYTE':
return numpy.int8
return numpy.uint8
def get_datatype_from_uri(dataset_uri):
"""
Returns the datatype for the first band from a gdal dataset
Args:
dataset_uri (string): a uri to a gdal dataset
Returns:
datatype: datatype for dataset band 1"""
dataset = gdal.Open(dataset_uri)
band = dataset.GetRasterBand(1)
datatype = band.DataType
#Make sure the dataset is closed and cleaned up
band = None
gdal.Dataset.__swig_destroy__(dataset)
dataset = None
return datatype
def get_row_col_from_uri(dataset_uri):
"""
Returns a tuple of number of rows and columns of that dataset uri.
Args:
dataset_uri (string): a uri to a gdal dataset
Returns:
tuple (tuple): 2-tuple (n_row, n_col) from dataset_uri"""
dataset = gdal.Open(dataset_uri)
n_rows = dataset.RasterYSize
n_cols = dataset.RasterXSize
#Make sure the dataset is closed and cleaned up
band = None
gdal.Dataset.__swig_destroy__(dataset)
dataset = None
return (n_rows, n_cols)
def calculate_raster_stats_uri(dataset_uri):
"""
Calculates and sets the min, max, stdev, and mean for the bands in
the raster.
Args:
dataset_uri (string): a uri to a GDAL raster dataset that will be
modified by having its band statistics set
Returns:
nothing
"""
dataset = gdal.Open(dataset_uri, gdal.GA_Update)
for band_number in range(dataset.RasterCount):
band = dataset.GetRasterBand(band_number + 1)
band.ComputeStatistics(False)
# Close and clean up dataset
band = None
gdal.Dataset.__swig_destroy__(dataset)
dataset = None
def get_statistics_from_uri(dataset_uri):
"""
Retrieves the min, max, mean, stdev from a GDAL Dataset
Args:
dataset_uri (string): a uri to a gdal dataset
Returns:
statistics: min, max, mean, stddev
"""
dataset = gdal.Open(dataset_uri)
band = dataset.GetRasterBand(1)
statistics = band.GetStatistics(0, 1)
# Close and clean up dataset
band = None
gdal.Dataset.__swig_destroy__(dataset)
dataset = None
return statistics
def get_cell_size_from_uri(dataset_uri):
"""
Returns the cell size of the dataset in meters. Raises an exception if the
raster is not square since this'll break most of the raster_utils
algorithms.
Args:
dataset_uri (string): uri to a gdal dataset
Returns:
size_meters: cell size of the dataset in meters"""
srs = osr.SpatialReference()
dataset = gdal.Open(dataset_uri)
if dataset is None:
raise IOError(
'File not found or not valid dataset type at: %s' % dataset_uri)
srs.SetProjection(dataset.GetProjection())
linear_units = srs.GetLinearUnits()
geotransform = dataset.GetGeoTransform()
# take absolute value since sometimes negative widths/heights
try:
numpy.testing.assert_approx_equal(
abs(geotransform[1]), abs(geotransform[5]))
size_meters = abs(geotransform[1]) * linear_units
except AssertionError as e:
size_meters = (
abs(geotransform[1]) + abs(geotransform[5])) / 2.0 * linear_units
# Close and clean up dataset
gdal.Dataset.__swig_destroy__(dataset)
dataset = None
return size_meters
def get_rat_as_dictionary_uri(dataset_uri):
"""
Returns the RAT of the first band of dataset as a dictionary.
Args:
dataset_uri: a GDAL dataset that has a RAT associated with the first band
Returns:
value (dictionary): a 2D dictionary where the first key is the column name and second is the row number
"""
dataset = gdal.Open(dataset_uri)
value = get_rat_as_dictionary(dataset)
# Make sure the dataset is closed and cleaned up
gdal.Dataset.__swig_destroy__(dataset)
dataset = None
return value
def get_rat_as_dictionary(dataset):
"""
Returns the RAT of the first band of dataset as a dictionary.
Args:
dataset: a GDAL dataset that has a RAT associated with the first band
Returns:
rat_dictionary (dictionary): a 2D dictionary where the first key is the column name and second is the row number
"""
pdb.set_trace()
band = dataset.GetRasterBand(1).GetDefaultRAT()
rat = band.GetDefaultRAT()
n_columns = rat.GetColumnCount()
n_rows = rat.GetRowCount()
rat_dictionary = {}
for col_index in range(n_columns):
# Initialize an empty list to store row data and figure out the type of data stored in that column.
col_type = rat.GetTypeOfCol(col_index)
col_name = rat.GetNameOfCol(col_index)
rat_dictionary[col_name] = []
# Now burn through all the rows to populate the column
for row_index in range(n_rows):
# This bit of python ugliness handles the known 3 types of gdal RAT fields.
if col_type == gdal.GFT_Integer:
value = rat.GetValueAsInt(row_index, col_index)
elif col_type == gdal.GFT_Real:
value = rat.GetValueAsDouble(row_index, col_index)
else:
# If the type is not int or real, default to a string, I think this is better than testing for a string
# and raising an exception if not
value = rat.GetValueAsString(row_index, col_index)
rat_dictionary[col_name].append(value)
return rat_dictionary
def get_raster_properties_uri(dataset_uri):
"""
Wrapper function for get_raster_properties() that passes in the dataset
URI instead of the datasets itself
Args:
dataset_uri (string): a URI to a GDAL raster dataset
Returns:
value (dictionary): a dictionary with the properties stored under relevant keys. The current list of things
returned is: width (w-e pixel resolution), height (n-s pixel resolution), XSize, YSize
"""
dataset = gdal.Open(dataset_uri)
value = get_raster_properties(dataset)
# Make sure the dataset is closed and cleaned up
gdal.Dataset.__swig_destroy__(dataset)
dataset = None
return value
def get_raster_properties(dataset):
"""
Get the width, height, X size, and Y size of the dataset and return the
values in a dictionary.
*This function can be expanded to return more properties if needed*
Args:
dataset: a GDAL raster dataset to get the properties from
Returns:
dataset_dict (dictionary): a dictionary with the properties stored under relevant keys. The current list of
things returned is: width (w-e pixel resolution), height (n-s pixel resolution), XSize, YSize
"""
dataset_dict = {}
geo_transform = dataset.GetGeoTransform()
dataset_dict['width'] = float(geo_transform[1])
dataset_dict['height'] = float(geo_transform[5])
dataset_dict['x_size'] = dataset.GetRasterBand(1).XSize
dataset_dict['y_size'] = dataset.GetRasterBand(1).YSize
return dataset_dict
def get_nodata_from_uri(dataset_uri):
"""
Returns the nodata value for the first band from a gdal dataset cast to its
correct numpy type.
Args:
dataset_uri (string): a uri to a gdal dataset
Returns:
nodata_cast: nodata value for dataset band 1
"""
dataset = gdal.Open(dataset_uri)
band = dataset.GetRasterBand(1)
nodata = band.GetNoDataValue()
if nodata is not None:
nodata = _gdal_to_numpy_type(band)(nodata)
else:
pass
band = None
gdal.Dataset.__swig_destroy__(dataset)
dataset = None
return nodata
def reclassify(rasterio_rst, reclass_list, output_filename, band=1, creation_options=dict()):
"""
MODIFIED: removed window walking... too slow..
this function will take a raster image as input and
reclassify its values given in the reclass_list.
The reclass list is a simple list of lists with the
following formatting:
[[begin_range, end_range, new_value]]
ie. [ [ 1,3,5 ],[ 3,4,6 ] ]
* which converts values 1 to 2.99999999 to 5
and values 3 to 3.99999999 to 6
all other values stay the same.
arguments:
rasterio_rst = raster image instance from rasterio package
reclass_list = list of reclassification values * see explanation
band = integer marking which band you wnat to return from the raster
default is 1.
creation_options = gdal style creation options, but in the rasterio implementation
* options must be in a dict where the key is the name of the gdal -co and the
value is the value passed to that flag.
i.e.
["COMPRESS=LZW"] becomes dict([('compress','lzw')])
"""
# this will update the metadata if a creation_options dict is passed as an arg.
import rasterio
meta = rasterio_rst.meta
if len(creation_options) < 0:
meta.update(creation_options)
with rasterio.open(output_filename, mode='w', **meta) as out_rst:
band_arr = rasterio_rst.read_band(band).data # this is a gotcha with the .data stuff
for rcl in reclass_list:
band_arr[numpy.logical_and(band_arr >= rcl[0], band_arr < rcl[1])] = rcl[2]
out_rst.write_band(band, band_arr)
return rasterio.open(output_filename)
def get_cell_size_from_uri(dataset_uri):
"""Get the cell size of a dataset in units of meters.
Raises an exception if the raster is not square since this'll break most of
the pygeoprocessing algorithms.
Args:
dataset_uri (string): uri to a gdal dataset
Returns:
size_meters: cell size of the dataset in meters
"""
srs = osr.SpatialReference()
dataset = gdal.Open(dataset_uri)
if dataset is None:
raise IOError(
'File not found or not valid dataset type at: %s' % dataset_uri)
srs.SetProjection(dataset.GetProjection())
linear_units = srs.GetLinearUnits()
geotransform = dataset.GetGeoTransform()
# take absolute value since sometimes negative widths/heights
try:
numpy.testing.assert_approx_equal(
abs(geotransform[1]), abs(geotransform[5]))
size_meters = abs(geotransform[1]) * linear_units
except AssertionError as e:
size_meters = (
abs(geotransform[1]) + abs(geotransform[5])) / 2.0 * linear_units
# Close and clean up dataset
gdal.Dataset.__swig_destroy__(dataset)
dataset = None
return size_meters
def reclassify_dataset_uri(
dataset_uri, value_map, raster_out_uri, out_datatype, out_nodata,
exception_flag='values_required', assert_dataset_projected=True):
"""Reclassify values in a dataset.
A function to reclassify values in dataset to any output type. By default
the values except for nodata must be in value_map.
Args:
dataset_uri (string): a uri to a gdal dataset
value_map (dictionary): a dictionary of values of
{source_value: dest_value, ...}
where source_value's type is a postive integer type and dest_value
is of type out_datatype.
raster_out_uri (string): the uri for the output raster
out_datatype (gdal type): the type for the output dataset
out_nodata (numerical type): the nodata value for the output raster.
Must be the same type as out_datatype
Keyword Args:
exception_flag (string): either 'none' or 'values_required'.
If 'values_required' raise an exception if there is a value in the
raster that is not found in value_map
assert_dataset_projected (boolean): if True this operation will
test if the input dataset is not projected and raise an exception
if so.
Returns:
nothing
Raises:
Exception: if exception_flag == 'values_required' and the value from
'key_raster' is not a key in 'attr_dict'
"""
if exception_flag not in ['none', 'values_required']:
raise ValueError('unknown exception_flag %s', exception_flag)
values_required = exception_flag == 'values_required'
nodata = get_nodata_from_uri(dataset_uri)
value_map_copy = value_map.copy()
# possible that nodata value is not defined, so test for None first
# otherwise if nodata not predefined, remap it into the dictionary
if nodata is not None and nodata not in value_map_copy:
value_map_copy[nodata] = out_nodata
keys = sorted(value_map_copy.keys())
values = numpy.array([value_map_copy[x] for x in keys])
def map_dataset_to_value(original_values):
"""Convert a block of original values to the lookup values."""
if values_required:
unique = numpy.unique(original_values)
has_map = numpy.in1d(unique, keys)
if not all(has_map):
raise ValueError(
'There was not a value for at least the following codes '
'%s for this file %s.\nNodata value is: %s' % (
str(unique[~has_map]), dataset_uri, str(nodata)))
index = numpy.digitize(original_values.ravel(), keys, right=True)
return values[index].reshape(original_values.shape)
out_pixel_size = get_cell_size_from_uri(dataset_uri)
vectorize_datasets(
[dataset_uri], map_dataset_to_value,
raster_out_uri, out_datatype, out_nodata, out_pixel_size,
"intersection", dataset_to_align_index=0,
vectorize_op=False, assert_datasets_projected=assert_dataset_projected,
datasets_are_pre_aligned=True)
def clip_dataset_uri(
source_dataset_uri, aoi_datasource_uri, out_dataset_uri,
assert_projections=True, process_pool=None, all_touched=False):
"""Clip raster dataset to bounding box of provided vector datasource aoi.
This function will clip source_dataset to the bounding box of the
polygons in aoi_datasource and mask out the values in source_dataset
outside of the AOI with the nodata values in source_dataset.
Args:
source_dataset_uri (string): uri to single band GDAL dataset to clip
aoi_datasource_uri (string): uri to ogr datasource
out_dataset_uri (string): path to disk for the clipped datset
Keyword Args:
assert_projections (boolean): a boolean value for whether the dataset
needs to be projected
process_pool: a process pool for multiprocessing
all_touched (boolean): if true the clip uses the option
ALL_TOUCHED=TRUE when calling RasterizeLayer for AOI masking.
Returns:
None
"""
source_dataset = gdal.Open(source_dataset_uri)
band = source_dataset.GetRasterBand(1)
nodata = band.GetNoDataValue()
datatype = band.DataType
if nodata is None:
nodata = -9999
gdal.Dataset.__swig_destroy__(source_dataset)
source_dataset = None
pixel_size = get_raster_info(source_dataset_uri)['mean_pixel_size']
vectorize_datasets(
[source_dataset_uri], lambda x: x, out_dataset_uri, datatype, nodata,
pixel_size, 'intersection', aoi_uri=aoi_datasource_uri,
assert_datasets_projected=assert_projections,
process_pool=process_pool, vectorize_op=False, all_touched=all_touched)
def get_raster_info(raster_path):
"""Get information about a GDAL raster dataset.
Parameters:
raster_path (String): a path to a GDAL raster.
Returns:
raster_properties (dictionary): a dictionary with the properties
stored under relevant keys.
'pixel_size' (tuple): (pixel x-size, pixel y-size) from
geotransform.
'mean_pixel_size' (float): the average size of the absolute value
of each pixel size element.
'raster_size' (tuple): number of raster pixels in (x, y)
direction.
'nodata' (float or list): if number of bands is 1, then this value
is the nodata value of the single band, otherwise a list of
the nodata values in increasing band index
'n_bands' (int): number of bands in the raster.
'geotransform' (tuple): a 6-tuple representing the geotransform of
(x orign, x-increase, xy-increase,
y origin, yx-increase, y-increase),
'datatype' (int): An instance of an enumerated gdal.GDT_* int
that represents the datatype of the raster.
"""
raster_properties = {}
raster = gdal.Open(raster_path)
geo_transform = raster.GetGeoTransform()
raster_properties['pixel_size'] = (geo_transform[1], geo_transform[5])
raster_properties['mean_pixel_size'] = (
(abs(geo_transform[1]) + abs(geo_transform[5])) / 2.0)
raster_properties['raster_size'] = (
raster.GetRasterBand(1).XSize,
raster.GetRasterBand(1).YSize)
raster_properties['n_bands'] = raster.RasterCount
raster_properties['nodata'] = [
raster.GetRasterBand(index).GetNoDataValue() for index in range(
1, raster_properties['n_bands']+1)]
if len(raster_properties['nodata']) == 1:
raster_properties['nodata'] = raster_properties['nodata'][0]
raster_properties['bounding_box'] = [
geo_transform[0], geo_transform[3],
(geo_transform[0] +
raster_properties['raster_size'][0] * geo_transform[1]),
(geo_transform[3] +
raster_properties['raster_size'][1] * geo_transform[5])]
raster_properties['geotransform'] = geo_transform
# datatype is the same for the whole raster, but is associated with band
raster_properties['datatype'] = raster.GetRasterBand(1).DataType
raster = None
return raster_properties
def vectorize_datasets(
dataset_uri_list, dataset_pixel_op, dataset_out_uri, datatype_out,
nodata_out, pixel_size_out, bounding_box_mode,
resample_method_list=None, dataset_to_align_index=None,
dataset_to_bound_index=None, aoi_uri=None,
assert_datasets_projected=True, process_pool=None, vectorize_op=True,
datasets_are_pre_aligned=False, dataset_options=None,
all_touched=False):
"""Apply local raster operation on stack of datasets.
This function applies a user defined function across a stack of
datasets. It has functionality align the output dataset grid
with one of the input datasets, output a dataset that is the union
or intersection of the input dataset bounding boxes, and control
over the interpolation techniques of the input datasets, if
necessary. The datasets in dataset_uri_list must be in the same
projection; the function will raise an exception if not.
Args:
dataset_uri_list (list): a list of file uris that point to files that
can be opened with gdal.Open.
dataset_pixel_op (function) a function that must take in as many
arguments as there are elements in dataset_uri_list. The arguments
can be treated as interpolated or actual pixel values from the
input datasets and the function should calculate the output
value for that pixel stack. The function is a parallel
paradigmn and does not know the spatial position of the
pixels in question at the time of the call. If the
`bounding_box_mode` parameter is "union" then the values
of input dataset pixels that may be outside their original
range will be the nodata values of those datasets. Known
bug: if dataset_pixel_op does not return a value in some cases
the output dataset values are undefined even if the function
does not crash or raise an exception.
dataset_out_uri (string): the uri of the output dataset. The
projection will be the same as the datasets in dataset_uri_list.
datatype_out: the GDAL output type of the output dataset
nodata_out: the nodata value of the output dataset.
pixel_size_out: the pixel size of the output dataset in
projected coordinates.
bounding_box_mode (string): one of "union" or "intersection",
"dataset". If union the output dataset bounding box will be the
union of the input datasets. Will be the intersection otherwise.
An exception is raised if the mode is "intersection" and the
input datasets have an empty intersection. If dataset it will make
a bounding box as large as the given dataset, if given
dataset_to_bound_index must be defined.
Keyword Args:
resample_method_list (list): a list of resampling methods
for each output uri in dataset_out_uri list. Each element
must be one of "nearest|bilinear|cubic|cubic_spline|lanczos".
If None, the default is "nearest" for all input datasets.
dataset_to_align_index (int): an int that corresponds to the position
in one of the dataset_uri_lists that, if positive aligns the output
rasters to fix on the upper left hand corner of the output
datasets. If negative, the bounding box aligns the intersection/
union without adjustment.
dataset_to_bound_index: if mode is "dataset" this indicates which
dataset should be the output size.
aoi_uri (string): a URI to an OGR datasource to be used for the
aoi. Irrespective of the `mode` input, the aoi will be used
to intersect the final bounding box.
assert_datasets_projected (boolean): if True this operation will
test if any datasets are not projected and raise an exception
if so.
process_pool: a process pool for multiprocessing
vectorize_op (boolean): if true the model will try to numpy.vectorize
dataset_pixel_op. If dataset_pixel_op is designed to use maximize
array broadcasting, set this parameter to False, else it may
inefficiently invoke the function on individual elements.
datasets_are_pre_aligned (boolean): If this value is set to False
this operation will first align and interpolate the input datasets
based on the rules provided in bounding_box_mode,
resample_method_list, dataset_to_align_index, and
dataset_to_bound_index, if set to True the input dataset list must
be aligned, probably by raster_utils.align_dataset_list
dataset_options: this is an argument list that will be
passed to the GTiff driver. Useful for blocksizes, compression,
etc.
all_touched (boolean): if true the clip uses the option
ALL_TOUCHED=TRUE when calling RasterizeLayer for AOI masking.
Returns:
None
Raises:
ValueError: invalid input provided
"""
if not isinstance(dataset_uri_list, list):
raise ValueError(
"dataset_uri_list was not passed in as a list, maybe a single "
"file was passed in? Here is its value: %s" %
(str(dataset_uri_list)))
if aoi_uri is None:
assert_file_existance(dataset_uri_list)
else:
assert_file_existance(dataset_uri_list + [aoi_uri])
if dataset_out_uri in dataset_uri_list:
raise ValueError(
"%s is used as an output file, but it is also an input file "
"in the input list %s" % (dataset_out_uri, str(dataset_uri_list)))
valid_bounding_box_modes = ["union", "intersection", "dataset"]
if bounding_box_mode not in valid_bounding_box_modes:
raise ValueError(
"Unknown bounding box mode %s; should be one of %s",
bounding_box_mode, valid_bounding_box_modes)
# Create a temporary list of filenames whose files delete on the python
# interpreter exit
if not datasets_are_pre_aligned:
# Handle the cases where optional arguments are passed in
if resample_method_list is None:
resample_method_list = ["nearest"] * len(dataset_uri_list)
if dataset_to_align_index is None:
dataset_to_align_index = -1
dataset_out_uri_list = [
temporary_filename(suffix='.tif') for _ in dataset_uri_list]
# Align and resample the datasets, then load datasets into a list
align_dataset_list(
dataset_uri_list, dataset_out_uri_list, resample_method_list,
pixel_size_out, bounding_box_mode, dataset_to_align_index,
dataset_to_bound_index=dataset_to_bound_index,
aoi_uri=aoi_uri,
assert_datasets_projected=assert_datasets_projected,
all_touched=all_touched)
aligned_datasets = [
gdal.Open(filename, gdal.GA_ReadOnly) for filename in
dataset_out_uri_list]
else:
# otherwise the input datasets are already aligned
aligned_datasets = [
gdal.Open(filename, gdal.GA_ReadOnly) for filename in
dataset_uri_list]
aligned_bands = [dataset.GetRasterBand(1) for dataset in aligned_datasets]
n_rows = aligned_datasets[0].RasterYSize
n_cols = aligned_datasets[0].RasterXSize
output_dataset = new_raster_from_base(
aligned_datasets[0], dataset_out_uri, 'GTiff', nodata_out,
datatype_out, dataset_options=dataset_options)
output_band = output_dataset.GetRasterBand(1)
block_size = output_band.GetBlockSize()
# makes sense to get the largest block size possible to reduce the number
# of expensive readasarray calls
for current_block_size in [band.GetBlockSize() for band in aligned_bands]:
if (current_block_size[0] * current_block_size[1] >
block_size[0] * block_size[1]):
block_size = current_block_size
cols_per_block, rows_per_block = block_size[0], block_size[1]
n_col_blocks = int(math.ceil(n_cols / float(cols_per_block)))
n_row_blocks = int(math.ceil(n_rows / float(rows_per_block)))
# If there's an AOI, mask it out
if aoi_uri is not None:
mask_uri = temporary_filename(suffix='.tif')
mask_dataset = new_raster_from_base(
aligned_datasets[0], mask_uri, 'GTiff', 255, gdal.GDT_Byte,
fill_value=0, dataset_options=dataset_options)
mask_band = mask_dataset.GetRasterBand(1)
aoi_datasource = ogr.Open(aoi_uri)
aoi_layer = aoi_datasource.GetLayer()
if all_touched:
option_list = ["ALL_TOUCHED=TRUE"]
else:
option_list = []
gdal.RasterizeLayer(
mask_dataset, [1], aoi_layer, burn_values=[1], options=option_list)
aoi_layer = None
aoi_datasource = None
# We only want to do this if requested, otherwise we might have a more
# efficient call if we don't vectorize.
if vectorize_op:
dataset_pixel_op = numpy.vectorize(
dataset_pixel_op, otypes=[_gdal_to_numpy_type(output_band)])
last_time = time.time()
last_row_block_width = None
last_col_block_width = None
for row_block_index in range(n_row_blocks):
row_offset = row_block_index * rows_per_block
row_block_width = n_rows - row_offset
if row_block_width > rows_per_block:
row_block_width = rows_per_block
for col_block_index in range(n_col_blocks):
col_offset = col_block_index * cols_per_block
col_block_width = n_cols - col_offset
if col_block_width > cols_per_block:
col_block_width = cols_per_block
current_time = time.time()
if current_time - last_time > 5.0:
last_time = current_time
#This is true at least once since last_* initialized with None
if (last_row_block_width != row_block_width or
last_col_block_width != col_block_width):
dataset_blocks = [
numpy.zeros(
(row_block_width, col_block_width),
dtype=_gdal_to_numpy_type(band)) for band in aligned_bands]
if aoi_uri != None:
mask_array = numpy.zeros(
(row_block_width, col_block_width), dtype=numpy.int8)
last_row_block_width = row_block_width
last_col_block_width = col_block_width
for dataset_index in range(len(aligned_bands)):
aligned_bands[dataset_index].ReadAsArray(
xoff=col_offset, yoff=row_offset,
win_xsize=col_block_width,
win_ysize=row_block_width,
buf_obj=dataset_blocks[dataset_index])
out_block = dataset_pixel_op(*dataset_blocks)
# Mask out the row if there is a mask
if aoi_uri is not None:
mask_band.ReadAsArray(
xoff=col_offset, yoff=row_offset,
win_xsize=col_block_width,
win_ysize=row_block_width,
buf_obj=mask_array)
out_block[mask_array == 0] = nodata_out
output_band.WriteArray(
out_block[0:row_block_width, 0:col_block_width],
xoff=col_offset, yoff=row_offset)
# Making sure the band and dataset is flushed and not in memory before
# adding stats
output_band.FlushCache()
output_band = None
output_dataset.FlushCache()
gdal.Dataset.__swig_destroy__(output_dataset)
output_dataset = None
# Clean up the files made by temporary file because we had an issue once
# where I was running the water yield model over 2000 times and it made
# so many temporary files I ran out of disk space.
if aoi_uri is not None:
mask_band = None
gdal.Dataset.__swig_destroy__(mask_dataset)
mask_dataset = None
os.remove(mask_uri)
aligned_bands = None
for dataset in aligned_datasets:
gdal.Dataset.__swig_destroy__(dataset)
aligned_datasets = None
if not datasets_are_pre_aligned:
# if they weren't pre-aligned then we have temporary files to remove
for temp_dataset_uri in dataset_out_uri_list:
try:
os.remove(temp_dataset_uri)
except OSError:
pass
calculate_raster_stats_uri(dataset_out_uri
)
def assert_file_existance(dataset_uri_list):
"""Assert that provided uris exist in filesystem.
Verify that the uris passed in the argument exist on the filesystem
if not, raise an exeception indicating which files do not exist
Args:
dataset_uri_list (list): a list of relative or absolute file paths to
validate
Returns:
None
Raises:
IOError: if any files are not found
"""
not_found_uris = []
for uri in dataset_uri_list:
if not os.path.exists(uri):
not_found_uris.append(uri)
if len(not_found_uris) != 0:
error_message = (
"The following files do not exist on the filesystem: " +
str(not_found_uris))
raise IOError(error_message)
def temporary_filename(suffix=''):
"""Get path to new temporary file that will be deleted on program exit.
Returns a temporary filename using mkstemp. The file is deleted
on exit using the atexit register.
Keyword Args:
suffix (string): the suffix to be appended to the temporary file
Returns:
fname: a unique temporary filename
"""
file_handle, path = tempfile.mkstemp(suffix=suffix)
os.close(file_handle)
def remove_file(path):
"""Function to remove a file and handle exceptions to register
in atexit."""
try:
os.remove(path)
except OSError:
# This happens if the file didn't exist, which is okay because
# maybe we deleted it in a method
pass
atexit.register(remove_file, path)
return path
def new_raster_from_base_uri(base_uri, *args, **kwargs):
"""A wrapper for the function new_raster_from_base that opens up
the base_uri before passing it to new_raster_from_base.
base_uri - a URI to a GDAL dataset on disk.
All other arguments to new_raster_from_base are passed in.
Returns nothing.
"""
base_raster = gdal.Open(base_uri)
if base_raster is None:
raise IOError("%s not found when opening GDAL raster")
new_raster = new_raster_from_base(base_raster, *args, **kwargs)
gdal.Dataset.__swig_destroy__(new_raster)
gdal.Dataset.__swig_destroy__(base_raster)
new_raster = None
base_raster = None
def new_raster_from_base(
base, output_uri, gdal_format, nodata, datatype, fill_value=None,
n_rows=None, n_cols=None, dataset_options=None):
"""Create a new, empty GDAL raster dataset with the spatial references,
geotranforms of the base GDAL raster dataset.
base - a the GDAL raster dataset to base output size, and transforms on
output_uri - a string URI to the new output raster dataset.
gdal_format - a string representing the GDAL file format of the
output raster. See http://gdal.org/formats_list.html for a list
of available formats. This parameter expects the format code, such
as 'GTiff' or 'MEM'
nodata - a value that will be set as the nodata value for the
output raster. Should be the same type as 'datatype'
datatype - the pixel datatype of the output raster, for example
gdal.GDT_Float32. See the following header file for supported
pixel types:
http://www.gdal.org/gdal_8h.html#22e22ce0a55036a96f652765793fb7a4
fill_value - (optional) the value to fill in the raster on creation
n_rows - (optional) if set makes the resulting raster have n_rows in it
if not, the number of rows of the outgoing dataset are equal to
the base.
n_cols - (optional) similar to n_rows, but for the columns.
dataset_options - (optional) a list of dataset options that gets
passed to the gdal creation driver, overrides defaults
returns a new GDAL raster dataset."""
#This might be a numpy type coming in, set it to native python type
try:
nodata = nodata.item()
except AttributeError:
pass
if n_rows is None:
n_rows = base.RasterYSize
if n_cols is None:
n_cols = base.RasterXSize
projection = base.GetProjection()
geotransform = base.GetGeoTransform()
driver = gdal.GetDriverByName(gdal_format)
base_band = base.GetRasterBand(1)
block_size = base_band.GetBlockSize()
metadata = base_band.GetMetadata('IMAGE_STRUCTURE')
base_band = None
if dataset_options == None:
#make a new list to make sure we aren't ailiasing one passed in
dataset_options = []
#first, should it be tiled? yes if it's not striped
if block_size[0] != n_cols:
#just do 256x256 blocks
dataset_options = [
'TILED=YES',
'BLOCKXSIZE=256',
'BLOCKYSIZE=256',
'BIGTIFF=IF_SAFER']
if 'PIXELTYPE' in metadata:
dataset_options.append('PIXELTYPE=' + metadata['PIXELTYPE'])
new_raster = driver.Create(
output_uri.encode('utf-8'), n_cols, n_rows, 1, datatype,
options=dataset_options)
new_raster.SetProjection(projection)
new_raster.SetGeoTransform(geotransform)
band = new_raster.GetRasterBand(1)
if nodata is not None:
band.SetNoDataValue(nodata)
else:
pass
if fill_value != None:
band.Fill(fill_value)
elif nodata is not None:
band.Fill(nodata)
band = None
return new_raster
def get_bounding_box(dataset_uri):
"""Get bounding box where coordinates are in projected units.
Args:
dataset_uri (string): a uri to a GDAL dataset
Returns:
bounding_box (list):
[upper_left_x, upper_left_y, lower_right_x, lower_right_y] in
projected coordinates
"""
dataset = gdal.Open(dataset_uri)
geotransform = dataset.GetGeoTransform()
n_cols = dataset.RasterXSize
n_rows = dataset.RasterYSize
bounding_box = [geotransform[0],
geotransform[3],
geotransform[0] + n_cols * geotransform[1],
geotransform[3] + n_rows * geotransform[5]]
# Close and cleanup dataset
gdal.Dataset.__swig_destroy__(dataset)
dataset = None
return bounding_box
def align_dataset_list(
dataset_uri_list, dataset_out_uri_list, resample_method_list,
out_pixel_size, mode, dataset_to_align_index,
dataset_to_bound_index=None, aoi_uri=None,
assert_datasets_projected=True, all_touched=False):
"""Create a new list of datasets that are aligned based on a list of
inputted datasets.
Take a list of dataset uris and generates a new set that is completely
aligned with identical projections and pixel sizes.
Args:
dataset_uri_list (list): a list of input dataset uris
dataset_out_uri_list (list): a parallel dataset uri list whose
positions correspond to entries in dataset_uri_list
resample_method_list (list): a list of resampling methods for each
output uri in dataset_out_uri list. Each element must be one of
"nearest|bilinear|cubic|cubic_spline|lanczos"
out_pixel_size: the output pixel size
mode (string): one of "union", "intersection", or "dataset" which
defines how the output output extents are defined as either the
union or intersection of the input datasets or to have the same
bounds as an existing raster. If mode is "dataset" then
dataset_to_bound_index must be defined
dataset_to_align_index (int): an int that corresponds to the position
in one of the dataset_uri_lists that, if positive aligns the output
rasters to fix on the upper left hand corner of the output
datasets. If negative, the bounding box aligns the intersection/
union without adjustment.
all_touched (boolean): if True and an AOI is passed, the
ALL_TOUCHED=TRUE option is passed to the RasterizeLayer function
when determining the mask of the AOI.
Keyword Args:
dataset_to_bound_index: if mode is "dataset" then this index is
used to indicate which dataset to define the output bounds of the
dataset_out_uri_list
aoi_uri (string): a URI to an OGR datasource to be used for the
aoi. Irrespective of the `mode` input, the aoi will be used
to intersect the final bounding box.
Returns:
None
"""
import functools
last_time = time.time()
# make sure that the input lists are of the same length
list_lengths = [
len(dataset_uri_list), len(dataset_out_uri_list),
len(resample_method_list)]
if not functools.reduce(lambda x, y: x if x == y else False, list_lengths):
raise Exception(
"dataset_uri_list, dataset_out_uri_list, and "
"resample_method_list must be the same length "
" current lengths are %s" % (str(list_lengths)))
if assert_datasets_projected:
assert_datasets_in_same_projection(dataset_uri_list)
if mode not in ["union", "intersection", "dataset"]:
raise Exception("Unknown mode %s" % (str(mode)))
if dataset_to_align_index >= len(dataset_uri_list):
raise Exception(
"Alignment index is out of bounds of the datasets index: %s"
"n_elements %s" % (dataset_to_align_index, len(dataset_uri_list)))
if mode == "dataset" and dataset_to_bound_index is None:
raise Exception(
"Mode is 'dataset' but dataset_to_bound_index is not defined")
if mode == "dataset" and (dataset_to_bound_index < 0 or
dataset_to_bound_index >= len(dataset_uri_list)):
raise Exception(
"dataset_to_bound_index is out of bounds of the datasets index: %s"
"n_elements %s" % (dataset_to_bound_index, len(dataset_uri_list)))
def merge_bounding_boxes(bb1, bb2, mode):
"""Helper function to merge two bounding boxes through union or
intersection"""
less_than_or_equal = lambda x, y: x if x <= y else y
greater_than = lambda x, y: x if x > y else y
if mode == "union":
comparison_ops = [
less_than_or_equal, greater_than, greater_than,
less_than_or_equal]
if mode == "intersection":
comparison_ops = [
greater_than, less_than_or_equal, less_than_or_equal,
greater_than]
bb_out = [op(x, y) for op, x, y in zip(comparison_ops, bb1, bb2)]
return bb_out
# get the intersecting or unioned bounding box
if mode == "dataset":
bounding_box = get_bounding_box(
dataset_uri_list[dataset_to_bound_index])
else:
bounding_box = functools.reduce(
functools.partial(merge_bounding_boxes, mode=mode),
[get_bounding_box(dataset_uri) for dataset_uri in dataset_uri_list])
if aoi_uri is not None:
bounding_box = merge_bounding_boxes(
bounding_box, get_datasource_bounding_box(aoi_uri), "intersection")
if (bounding_box[0] >= bounding_box[2] or
bounding_box[1] <= bounding_box[3]) and mode == "intersection":
raise Exception("The datasets' intersection is empty "
"(i.e., not all the datasets touch each other).")
if dataset_to_align_index >= 0:
# bounding box needs alignment
align_bounding_box = get_bounding_box(
dataset_uri_list[dataset_to_align_index])
align_pixel_size = get_cell_size_from_uri(
dataset_uri_list[dataset_to_align_index])
for index in [0, 1]:
n_pixels = int(
(bounding_box[index] - align_bounding_box[index]) /
float(align_pixel_size))
bounding_box[index] = \
n_pixels * align_pixel_size + align_bounding_box[index]
for original_dataset_uri, out_dataset_uri, resample_method, index in zip(
dataset_uri_list, dataset_out_uri_list, resample_method_list,
range(len(dataset_uri_list))):
current_time = time.time()
if current_time - last_time > 5.0:
last_time = current_time
resize_and_resample_dataset_uri(
original_dataset_uri, bounding_box, out_pixel_size,
out_dataset_uri, resample_method)
# If there's an AOI, mask it out
if aoi_uri is not None:
first_dataset = gdal.Open(dataset_out_uri_list[0])
n_rows = first_dataset.RasterYSize
n_cols = first_dataset.RasterXSize
gdal.Dataset.__swig_destroy__(first_dataset)
first_dataset = None
mask_uri = temporary_filename(suffix='.tif')
new_raster_from_base_uri(
dataset_out_uri_list[0], mask_uri, 'GTiff', 255, gdal.GDT_Byte,
fill_value=0)
mask_dataset = gdal.Open(mask_uri, gdal.GA_Update)
mask_band = mask_dataset.GetRasterBand(1)
aoi_datasource = ogr.Open(aoi_uri)
aoi_layer = aoi_datasource.GetLayer()
if all_touched:
option_list = ["ALL_TOUCHED=TRUE"]
else:
option_list = []
gdal.RasterizeLayer(
mask_dataset, [1], aoi_layer, burn_values=[1], options=option_list)
mask_row = numpy.zeros((1, n_cols), dtype=numpy.int8)
out_dataset_list = [
gdal.Open(uri, gdal.GA_Update) for uri in dataset_out_uri_list]
out_band_list = [
dataset.GetRasterBand(1) for dataset in out_dataset_list]
nodata_out_list = [
get_nodata_from_uri(uri) for uri in dataset_out_uri_list]
for row_index in range(n_rows):
mask_row = (mask_band.ReadAsArray(
0, row_index, n_cols, 1) == 0)
for out_band, nodata_out in zip(out_band_list, nodata_out_list):
dataset_row = out_band.ReadAsArray(
0, row_index, n_cols, 1)
out_band.WriteArray(
numpy.where(mask_row, nodata_out, dataset_row),
xoff=0, yoff=row_index)
# Remove the mask aoi if necessary
mask_band = None
gdal.Dataset.__swig_destroy__(mask_dataset)
mask_dataset = None
os.remove(mask_uri)
# Close and clean up datasource
aoi_layer = None
ogr.DataSource.__swig_destroy__(aoi_datasource)
aoi_datasource = None
# Clean up datasets
out_band_list = None
for dataset in out_dataset_list:
dataset.FlushCache()
gdal.Dataset.__swig_destroy__(dataset)
out_dataset_list = None
def assert_datasets_in_same_projection(dataset_uri_list):
"""Assert that provided datasets are all in the same projection.
Tests if datasets represented by their uris are projected and in
the same projection and raises an exception if not.
Args:
dataset_uri_list (list): (description)
Returns:
is_true (boolean): True (otherwise exception raised)
Raises:
DatasetUnprojected: if one of the datasets is unprojected.
DifferentProjections: if at least one of the datasets is in
a different projection
"""
dataset_list = [gdal.Open(dataset_uri) for dataset_uri in dataset_uri_list]
dataset_projections = []
unprojected_datasets = set()
for dataset in dataset_list:
projection_as_str = dataset.GetProjection()
dataset_sr = osr.SpatialReference()
dataset_sr.ImportFromWkt(projection_as_str)
if not dataset_sr.IsProjected():
unprojected_datasets.add(dataset.GetFileList()[0])
dataset_projections.append((dataset_sr, dataset.GetFileList()[0]))
if len(unprojected_datasets) > 0:
pass
for index in range(len(dataset_projections)-1):
if not dataset_projections[index][0].IsSame(
dataset_projections[index+1][0]):
pass
for dataset in dataset_list:
# Close and clean up dataset
gdal.Dataset.__swig_destroy__(dataset)
dataset_list = None
return True
def resize_and_resample_dataset_uri(
original_dataset_uri, bounding_box, out_pixel_size, output_uri,
resample_method):
"""Resize and resample the given dataset.
Args:
original_dataset_uri (string): a GDAL dataset
bounding_box (list): [upper_left_x, upper_left_y, lower_right_x,
lower_right_y]
out_pixel_size: the pixel size in projected linear units
output_uri (string): the location of the new resampled GDAL dataset
resample_method (string): the resampling technique, one of
"nearest|bilinear|cubic|cubic_spline|lanczos"
Returns:
None
"""
resample_dict = {
"nearest": gdal.GRA_NearestNeighbour,
"bilinear": gdal.GRA_Bilinear,
"cubic": gdal.GRA_Cubic,
"cubic_spline": gdal.GRA_CubicSpline,
"lanczos": gdal.GRA_Lanczos
}
original_dataset = gdal.Open(original_dataset_uri)
original_band = original_dataset.GetRasterBand(1)
original_nodata = original_band.GetNoDataValue()
if original_nodata is None:
original_nodata = -9999
original_sr = osr.SpatialReference()
original_sr.ImportFromWkt(original_dataset.GetProjection())
output_geo_transform = [
bounding_box[0], out_pixel_size, 0.0, bounding_box[1], 0.0,
-out_pixel_size]
new_x_size = abs(
int(numpy.round((bounding_box[2] - bounding_box[0]) / out_pixel_size)))
new_y_size = abs(
int(numpy.round((bounding_box[3] - bounding_box[1]) / out_pixel_size)))
if new_x_size == 0:
new_x_size = 1
if new_y_size == 0:
new_y_size = 1
# create the new x and y size
block_size = original_band.GetBlockSize()
# If the original band is tiled, then its x blocksize will be different
# than the number of columns
if original_band.XSize > 256 and original_band.YSize > 256:
# it makes sense for many functions to have 256x256 blocks
block_size[0] = 256
block_size[1] = 256
gtiff_creation_options = [
'TILED=YES', 'BIGTIFF=IF_SAFER', 'BLOCKXSIZE=%d' % block_size[0],
'BLOCKYSIZE=%d' % block_size[1]]
metadata = original_band.GetMetadata('IMAGE_STRUCTURE')
if 'PIXELTYPE' in metadata:
gtiff_creation_options.append('PIXELTYPE=' + metadata['PIXELTYPE'])
else:
# it is so small or strangely aligned, use the default creation options
gtiff_creation_options = []
create_directories([os.path.dirname(output_uri)])
gdal_driver = gdal.GetDriverByName('GTiff')
output_dataset = gdal_driver.Create(
output_uri, new_x_size, new_y_size, 1, original_band.DataType,
options=gtiff_creation_options)
output_band = output_dataset.GetRasterBand(1)
output_band.SetNoDataValue(original_nodata)
# Set the geotransform
output_dataset.SetGeoTransform(output_geo_transform)
output_dataset.SetProjection(original_sr.ExportToWkt())
# need to make this a closure so we get the current time and we can affect
# state
def reproject_callback(df_complete, psz_message, p_progress_arg):
"""The argument names come from the GDAL API for callbacks."""
try:
current_time = time.time()
if ((current_time - reproject_callback.last_time) > 5.0 or
(df_complete == 1.0 and reproject_callback.total_time >= 5.0)):
reproject_callback.last_time = current_time
reproject_callback.total_time += current_time
except AttributeError:
reproject_callback.last_time = time.time()
reproject_callback.total_time = 0.0
# Perform the projection/resampling
gdal.ReprojectImage(
original_dataset, output_dataset, original_sr.ExportToWkt(),
original_sr.ExportToWkt(), resample_dict[resample_method], 0, 0,
reproject_callback, [output_uri])
# Make sure the dataset is closed and cleaned up
original_band = None
gdal.Dataset.__swig_destroy__(original_dataset)
original_dataset = None
output_dataset.FlushCache()
gdal.Dataset.__swig_destroy__(output_dataset)
output_dataset = None
calculate_raster_stats_uri(output_uri)
def create_directories(directory_list):
"""Make directories provided in list of path strings.
This function will create any of the directories in the directory list
if possible and raise exceptions if something exception other than
the directory previously existing occurs.
Args:
directory_list (list): a list of string uri paths
Returns:
None
"""
for dir_name in directory_list:
try:
os.makedirs(dir_name)
except OSError as exception:
#It's okay if the directory already exists, if it fails for
#some other reason, raise that exception
if (exception.errno != errno.EEXIST and
exception.errno != errno.ENOENT):
raise
def get_datasource_bounding_box(datasource_uri):
"""Get datasource bounding box where coordinates are in projected units.
Args:
dataset_uri (string): a uri to a GDAL dataset
Returns:
bounding_box (list):
[upper_left_x, upper_left_y, lower_right_x, lower_right_y] in
projected coordinates
"""
datasource = ogr.Open(datasource_uri)
layer = datasource.GetLayer(0)
extent = layer.GetExtent()
# Reindex datasource extents into the upper left/lower right coordinates
bounding_box = [extent[0],
extent[3],
extent[1],
extent[2]]
return bounding_boxz
def iterblocks(
raster_path, band_index_list=None, largest_block=_LARGEST_ITERBLOCK,
astype=None, offset_only=False):
"""Iterate across all the memory blocks in the input raster.
Result is a generator of block location information and numpy arrays.
This is especially useful when a single value needs to be derived from the
pixel values in a raster, such as the sum total of all pixel values, or
a sequence of unique raster values. In such cases, `raster_local_op`
is overkill, since it writes out a raster.
As a generator, this can be combined multiple times with itertools.izip()
to iterate 'simultaneously' over multiple rasters, though the user should
be careful to do so only with prealigned rasters.
Parameters:
raster_path (string): Path to raster file to iterate over.
band_index_list (list of ints or None): A list of the bands for which
the matrices should be returned. The band number to operate on.
Defaults to None, which will return all bands. Bands may be
specified in any order, and band indexes may be specified multiple
times. The blocks returned on each iteration will be in the order
specified in this list.
largest_block (int): Attempts to iterate over raster blocks with
this many elements. Useful in cases where the blocksize is
relatively small, memory is available, and the function call
overhead dominates the iteration. Defaults to 2**20. A value of
anything less than the original blocksize of the raster will
result in blocksizes equal to the original size.
astype (list of numpy types): If none, output blocks are in the native
type of the raster bands. Otherwise this parameter is a list
of len(band_index_list) length that contains the desired output
types that iterblock generates for each band.
offset_only (boolean): defaults to False, if True `iterblocks` only
returns offset dictionary and doesn't read any binary data from
the raster. This can be useful when iterating over writing to
an output.
Returns:
If `offset_only` is false, on each iteration, a tuple containing a dict
of block data and `n` 2-dimensional numpy arrays are returned, where
`n` is the number of bands requested via `band_list`. The dict of
block data has these attributes:
data['xoff'] - The X offset of the upper-left-hand corner of the
block.
data['yoff'] - The Y offset of the upper-left-hand corner of the
block.
data['win_xsize'] - The width of the block.
data['win_ysize'] - The height of the block.
If `offset_only` is True, the function returns only the block offset
data and does not attempt to read binary data from the raster.
"""
raster = gdal.OpenEx(raster_path)
if band_index_list is None:
band_index_list = range(1, raster.RasterCount + 1)
band_index_list = [
raster.GetRasterBand(index) for index in band_index_list]
block = band_index_list[0].GetBlockSize()
cols_per_block = block[0]
rows_per_block = block[1]
n_cols = raster.RasterXSize
n_rows = raster.RasterYSize
block_area = cols_per_block * rows_per_block
# try to make block wider
if largest_block / block_area > 0:
width_factor = largest_block / block_area
cols_per_block *= width_factor
if cols_per_block > n_cols:
cols_per_block = n_cols
block_area = cols_per_block * rows_per_block
# try to make block taller
if largest_block / block_area > 0:
height_factor = largest_block / block_area
rows_per_block *= height_factor
if rows_per_block > n_rows:
rows_per_block = n_rows
n_col_blocks = int(math.ceil(n_cols / float(cols_per_block)))
n_row_blocks = int(math.ceil(n_rows / float(rows_per_block)))
# Initialize to None so a block array is created on the first iteration
last_row_block_width = None
last_col_block_width = None
if astype is not None:
block_type_list = [astype] * len(band_index_list)
else:
block_type_list = [
_gdal_to_numpy_type(ds_band) for ds_band in band_index_list]
for row_block_index in range(n_row_blocks):
row_offset = row_block_index * rows_per_block
row_block_width = n_rows - row_offset
if row_block_width > rows_per_block:
row_block_width = rows_per_block
for col_block_index in range(n_col_blocks):
col_offset = col_block_index * cols_per_block
col_block_width = n_cols - col_offset
if col_block_width > cols_per_block:
col_block_width = cols_per_block
# resize the raster block cache if necessary
if (last_row_block_width != row_block_width or
last_col_block_width != col_block_width):
raster_blocks = [
numpy.zeros(
(row_block_width, col_block_width),
dtype=block_type) for block_type in
block_type_list]
offset_dict = {
'xoff': col_offset,
'yoff': row_offset,
'win_xsize': col_block_width,
'win_ysize': row_block_width,
}
result = offset_dict
if not offset_only:
for ds_band, block in zip(band_index_list, raster_blocks):
ds_band.ReadAsArray(buf_obj=block, **offset_dict)
result = (result,) + tuple(raster_blocks)
yield result
def get_vector_info(vector_path, layer_index=0):
"""Get information about an OGR vector (datasource).
Parameters:
vector_path (str): a path to a OGR vector.
layer_index (int): index of underlying layer to analyze. Defaults to
0.
Returns:
raster_properties (dictionary): a dictionary with the following
properties stored under relevant keys.
'projection' (string): projection of the vector in Well Known
Text.
'bounding_box' (list): list of floats representing the bounding
box in projected coordinates as [minx, miny, maxx, maxy].
"""
vector = gdal.OpenEx(vector_path)
vector_properties = {}
layer = vector.GetLayer(iLayer=layer_index)
# projection is same for all layers, so just use the first one
vector_properties['projection'] = layer.GetSpatialRef().ExportToWkt()
layer_bb = layer.GetExtent()
layer = None
vector = None
# convert form [minx,maxx,miny,maxy] to [minx,miny,maxx,maxy]
vector_properties['bounding_box'] = [layer_bb[i] for i in [0, 2, 1, 3]]
return vector_properties
def _merge_bounding_boxes(bb1, bb2, mode):
"""Merge two bounding boxes through union or intersection.
Parameters:
bb1, bb2 (list): list of float representing bounding box in the
form bb=[minx,miny,maxx,maxy]
mode (string); one of 'union' or 'intersection'
Returns:
Reduced bounding box of bb1/bb2 depending on mode.
"""
def _less_than_or_equal(x_val, y_val):
return x_val if x_val <= y_val else y_val
def _greater_than(x_val, y_val):
return x_val if x_val > y_val else y_val
if mode == "union":
comparison_ops = [
_less_than_or_equal, _less_than_or_equal,
_greater_than, _greater_than]
if mode == "intersection":
comparison_ops = [
_greater_than, _greater_than,
_less_than_or_equal, _less_than_or_equal]
bb_out = [op(x, y) for op, x, y in zip(comparison_ops, bb1, bb2)]
return bb_out
def _invoke_timed_callback(
reference_time, callback_lambda, callback_period):
"""Invoke callback if a certain amount of time has passed.
This is a convenience function to standardize update callbacks from the
module.
Parameters:
reference_time (float): time to base `callback_period` length from.
callback_lambda (lambda): function to invoke if difference between
current time and `reference_time` has exceeded `callback_period`.
callback_period (float): time in seconds to pass until
`callback_lambda` is invoked.
Returns:
`reference_time` if `callback_lambda` not invoked, otherwise the time
when `callback_lambda` was invoked.
"""
current_time = time.time()
if current_time - reference_time > callback_period:
callback_lambda()
return current_time
return reference_time
def align_and_resize_raster_stack(
base_raster_path_list, target_raster_path_list, resample_method_list,
target_pixel_size, bounding_box_mode, base_vector_path_list=None,
raster_align_index=None,
gtiff_creation_options=_DEFAULT_GTIFF_CREATION_OPTIONS):
"""Generate rasters from a base such that they align geospatially.
This function resizes base rasters that are in the same geospatial
projection such that the result is an aligned stack of rasters that have
the same cell size, dimensions, and bounding box. This is achieved by
clipping or resizing the rasters to intersected, unioned, or equivocated
bounding boxes of all the raster and vector input.
Parameters:
base_raster_path_list (list): a list of base raster paths that will
be transformed and will be used to determine the target bounding
box.
target_raster_path_list (list): a list of raster paths that will be
created to one-to-one map with `base_raster_path_list` as aligned
versions of those original rasters.
resample_method_list (list): a list of resampling methods which
one to one map each path in `base_raster_path_list` during
resizing. Each element must be one of
"nearest|bilinear|cubic|cubic_spline|lanczos|mode".
target_pixel_size (tuple): the target raster's x and y pixel size
example: [30, -30].
bounding_box_mode (string): one of "union", "intersection", or
a list of floats of the form [minx, miny, maxx, maxy]. Depending
on the value, output extents are defined as the union,
intersection, or the explicit bounding box.
base_vector_path_list (list): a list of base vector paths whose
bounding boxes will be used to determine the final bounding box
of the raster stack if mode is 'union' or 'intersection'. If mode
is 'bb=[...]' then these vectors are not used in any calculation.
raster_align_index (int): indicates the index of a
raster in `base_raster_path_list` that the target rasters'
bounding boxes pixels should align with. This feature allows
rasters whose raster dimensions are the same, but bounding boxes
slightly shifted less than a pixel size to align with a desired
grid layout. If `None` then the bounding box of the target
rasters is calculated as the precise intersection, union, or
bounding box.
gtiff_creation_options (list): list of strings that will be passed
as GDAL "dataset" creation options to the GTIFF driver, or ignored
if None.
Returns:
None
"""
last_time = time.time()
# make sure that the input lists are of the same length
list_lengths = [
len(base_raster_path_list), len(target_raster_path_list),
len(resample_method_list)]
if len(set(list_lengths)) != 1:
raise ValueError(
"base_raster_path_list, target_raster_path_list, and "
"resample_method_list must be the same length "
" current lengths are %s" % (str(list_lengths)))
# we can accept 'union', 'intersection', or a 4 element list/tuple
if bounding_box_mode not in ["union", "intersection"] and (
not isinstance(bounding_box_mode, (list, tuple)) or
len(bounding_box_mode) != 4):
raise ValueError("Unknown bounding_box_mode %s" % (
str(bounding_box_mode)))
if ((raster_align_index is not None) and
((raster_align_index < 0) or
(raster_align_index >= len(base_raster_path_list)))):
raise ValueError(
"Alignment index is out of bounds of the datasets index: %s"
" n_elements %s" % (
raster_align_index, len(base_raster_path_list)))
raster_info_list = [
get_raster_info(path) for path in base_raster_path_list]
if base_vector_path_list is not None:
vector_info_list = [
get_vector_info(path) for path in base_vector_path_list]
else:
vector_info_list = []
# get the literal or intersecting/unioned bounding box
if isinstance(bounding_box_mode, (list, tuple)):
target_bounding_box = bounding_box_mode
else:
# either intersection or union
from functools import reduce
target_bounding_box = reduce(
functools.partial(_merge_bounding_boxes, mode=bounding_box_mode),
[info['bounding_box'] for info in
(raster_info_list + vector_info_list)])
if bounding_box_mode == "intersection" and (
target_bounding_box[0] > target_bounding_box[2] or
target_bounding_box[1] > target_bounding_box[3]):
raise ValueError("The rasters' and vectors' intersection is empty "
"(not all rasters and vectors touch each other).")
if raster_align_index >= 0:
# bounding box needs alignment
align_bounding_box = (
raster_info_list[raster_align_index]['bounding_box'])
align_pixel_size = (
raster_info_list[raster_align_index]['pixel_size'])
# adjust bounding box so lower left corner aligns with a pixel in
# raster[raster_align_index]
for index in [0, 1]:
n_pixels = int(
(target_bounding_box[index] - align_bounding_box[index]) /
float(align_pixel_size[index]))
target_bounding_box[index] = (
n_pixels * align_pixel_size[index] +
align_bounding_box[index])
for index, (base_path, target_path, resample_method) in enumerate(zip(
base_raster_path_list, target_raster_path_list,
resample_method_list)):
last_time = _invoke_timed_callback(
last_time, lambda: LOGGER.info(
"align_dataset_list aligning dataset %d of %d",
index, len(base_raster_path_list)), _LOGGING_PERIOD)
warp_raster(
base_path, target_pixel_size,
target_path, resample_method,
target_bb=target_bounding_box,
gtiff_creation_options=gtiff_creation_options)
def warp_raster(
base_raster_path, target_pixel_size, target_raster_path,
resample_method, target_bb=None, target_sr_wkt=None,
gtiff_creation_options=_DEFAULT_GTIFF_CREATION_OPTIONS):
"""Resize/resample raster to desired pixel size, bbox and projection.
Parameters:
base_raster_path (string): path to base raster.
target_pixel_size (list): a two element list or tuple indicating the
x and y pixel size in projected units.
target_raster_path (string): the location of the resized and
resampled raster.
resample_method (string): the resampling technique, one of
"nearest|bilinear|cubic|cubic_spline|lanczos|mode"
target_bb (list): if None, target bounding box is the same as the
source bounding box. Otherwise it's a list of float describing
target bounding box in target coordinate system as
[minx, miny, maxx, maxy].
target_sr_wkt (string): if not None, desired target projection in Well
Known Text format.
gtiff_creation_options (list or tuple): list of strings that will be
passed as GDAL "dataset" creation options to the GTIFF driver.
Returns:
None
"""
base_raster = gdal.OpenEx(base_raster_path)
base_sr = osr.SpatialReference()
base_sr.ImportFromWkt(base_raster.GetProjection())
if target_bb is None:
target_bb = get_raster_info(base_raster_path)['bounding_box']
# transform the target_bb if target_sr_wkt is not None
if target_sr_wkt is not None:
target_bb = transform_bounding_box(
get_raster_info(base_raster_path)['bounding_box'],
get_raster_info(base_raster_path)['projection'],
target_sr_wkt)
target_geotransform = [
target_bb[0], target_pixel_size[0], 0.0, target_bb[1], 0.0,
target_pixel_size[1]]
# this handles a case of a negative pixel size in which case the raster
# row will increase downward
if target_pixel_size[0] < 0:
target_geotransform[0] = target_bb[2]
if target_pixel_size[1] < 0:
target_geotransform[3] = target_bb[3]
target_x_size = abs((target_bb[2] - target_bb[0]) / target_pixel_size[0])
target_y_size = abs((target_bb[3] - target_bb[1]) / target_pixel_size[1])
if target_x_size - int(target_x_size) > 0:
target_x_size = int(target_x_size) + 1
else:
target_x_size = int(target_x_size)
if target_y_size - int(target_y_size) > 0:
target_y_size = int(target_y_size) + 1
else:
target_y_size = int(target_y_size)
if target_x_size == 0:
LOGGER.warn(
"bounding_box is so small that x dimension rounds to 0; "
"clamping to 1.")
target_x_size = 1
if target_y_size == 0:
LOGGER.warn(
"bounding_box is so small that y dimension rounds to 0; "
"clamping to 1.")
target_y_size = 1
local_gtiff_creation_options = list(gtiff_creation_options)
# PIXELTYPE is sometimes used to define signed vs. unsigned bytes and
# the only place that is stored is in the IMAGE_STRUCTURE metadata
# copy it over if it exists; get this info from the first band since
# all bands have the same datatype
base_band = base_raster.GetRasterBand(1)
metadata = base_band.GetMetadata('IMAGE_STRUCTURE')
if 'PIXELTYPE' in metadata:
local_gtiff_creation_options.append(
'PIXELTYPE=' + metadata['PIXELTYPE'])
# make directory if it doesn't exist
try:
os.makedirs(os.path.dirname(target_raster_path))
except OSError:
pass
gdal_driver = gdal.GetDriverByName('GTiff')
target_raster = gdal_driver.Create(
target_raster_path, target_x_size, target_y_size,
base_raster.RasterCount, base_band.DataType,
options=local_gtiff_creation_options)
base_band = None
for index in range(target_raster.RasterCount):
base_nodata = base_raster.GetRasterBand(1+index).GetNoDataValue()
if base_nodata is not None:
target_band = target_raster.GetRasterBand(1+index)
target_band.SetNoDataValue(base_nodata)
target_band = None
# Set the geotransform
target_raster.SetGeoTransform(target_geotransform)
if target_sr_wkt is None:
target_sr_wkt = base_sr.ExportToWkt()
target_raster.SetProjection(target_sr_wkt)
# need to make this a closure so we get the current time and we can affect
# state
reproject_callback = _make_logger_callback(
"ReprojectImage %.1f%% complete %s, psz_message '%s'")
# Perform the projection/resampling
gdal.ReprojectImage(
base_raster, target_raster, base_sr.ExportToWkt(),
target_sr_wkt, _RESAMPLE_DICT[resample_method], 0, 0,
reproject_callback, [target_raster_path])
target_raster = None
base_raster = None
calculate_raster_stats(target_raster_path)
def transform_bounding_box(
bounding_box, base_ref_wkt, target_ref_wkt, edge_samples=11):
"""Transform input bounding box to output projection.
This transform accounts for the fact that the reprojected square bounding
box might be warped in the new coordinate system. To account for this,
the function samples points along the original bounding box edges and
attempts to make the largest bounding box around any transformed point
on the edge whether corners or warped edges.
Parameters:
bounding_box (list): a list of 4 coordinates in `base_epsg` coordinate
system describing the bound in the order [xmin, ymin, xmax, ymax]
base_ref_wkt (string): the spatial reference of the input coordinate
system in Well Known Text.
target_ref_wkt (string): the spatial reference of the desired output
coordinate system in Well Known Text.
edge_samples (int): the number of interpolated points along each
bounding box edge to sample along. A value of 2 will sample just
the corners while a value of 3 will also sample the corners and
the midpoint.
Returns:
A list of the form [xmin, ymin, xmax, ymax] that describes the largest
fitting bounding box around the original warped bounding box in
`new_epsg` coordinate system.
"""
base_ref = osr.SpatialReference()
base_ref.ImportFromWkt(base_ref_wkt)
target_ref = osr.SpatialReference()
target_ref.ImportFromWkt(target_ref_wkt)
transformer = osr.CoordinateTransformation(base_ref, target_ref)
def _transform_point(point):
"""Transform an (x,y) point tuple from base_ref to target_ref."""
trans_x, trans_y, _ = (transformer.TransformPoint(*point))
return (trans_x, trans_y)
# The following list comprehension iterates over each edge of the bounding
# box, divides each edge into `edge_samples` number of points, then
# reduces that list to an appropriate `bounding_fn` given the edge.
# For example the left edge needs to be the minimum x coordinate so
# we generate `edge_samples` number of points between the upper left and
# lower left point, transform them all to the new coordinate system
# then get the minimum x coordinate "min(p[0] ...)" of the batch.
# points are numbered from 0 starting upper right as follows:
# 0--3
# | |
# 1--2
p_0 = numpy.array((bounding_box[0], bounding_box[3]))
p_1 = numpy.array((bounding_box[0], bounding_box[1]))
p_2 = numpy.array((bounding_box[2], bounding_box[1]))
p_3 = numpy.array((bounding_box[2], bounding_box[3]))
transformed_bounding_box = [
bounding_fn(
[_transform_point(
p_a * v + p_b * (1 - v)) for v in numpy.linspace(
0, 1, edge_samples)])
for p_a, p_b, bounding_fn in [
(p_0, p_1, lambda p_list: min([p[0] for p in p_list])),
(p_1, p_2, lambda p_list: min([p[1] for p in p_list])),
(p_2, p_3, lambda p_list: max([p[0] for p in p_list])),
(p_3, p_0, lambda p_list: max([p[1] for p in p_list]))]]
return transformed_bounding_box
def _make_logger_callback(message):
"""Build a timed logger callback that prints `message` replaced.
Parameters:
message (string): a string that expects 3 placement %% variables,
first for % complete from `df_complete`, second `psz_message`
and last is `p_progress_arg[0]`.
Returns:
Function with signature:
logger_callback(df_complete, psz_message, p_progress_arg)
"""
def logger_callback(df_complete, psz_message, p_progress_arg):
"""The argument names come from the GDAL API for callbacks."""
try:
current_time = time.time()
if ((current_time - logger_callback.last_time) > 5.0 or
(df_complete == 1.0 and
logger_callback.total_time >= 5.0)):
LOGGER.info(
message, df_complete * 100, p_progress_arg[0],
psz_message)
logger_callback.last_time = current_time
logger_callback.total_time += current_time
except AttributeError:
logger_callback.last_time = time.time()
logger_callback.total_time = 0.0
return logger_callback
def _is_raster_path_band_formatted(raster_path_band):
"""Returns true if raster path band is a (str, int) tuple/list."""
if not isinstance(raster_path_band, (list, tuple)):
return False
elif len(raster_path_band) != 2:
return False
elif not isinstance(raster_path_band[0], str):
return False
elif not isinstance(raster_path_band[1], int):
return False
else:
return True
def zonal_statistics(
base_raster_path_band, aggregate_vector_path,
aggregate_field_name, aggregate_layer_name=None,
ignore_nodata=True, all_touched=False, polygons_might_overlap=True,
working_dir=None):
"""Collect stats on pixel values which lie within polygons.
This function summarizes raster statistics including min, max,
mean, stddev, and pixel count over the regions on the raster that are
overlaped by the polygons in the vector layer. This function can
handle cases where polygons overlap, which is notable since zonal stats
functions provided by ArcGIS or QGIS usually incorrectly aggregate
these areas. Overlap avoidance is achieved by calculating a minimal set
of disjoint non-overlapping polygons from `aggregate_vector_path` and
rasterizing each set separately during the raster aggregation phase. That
set of rasters are then used to calculate the zonal stats of all polygons
without aggregating vector overlap.
Parameters:
base_raster_path_band (tuple): a str/int tuple indicating the path to
the base raster and the band index of that raster to analyze.
aggregate_vector_path (string): a path to an ogr compatable polygon
vector whose geometric features indicate the areas over
`base_raster_path_band` to calculate statistics over.
aggregate_field_name (string): field name in `aggregate_vector_path`
that represents an identifying value for a given polygon. Result
of this function will be indexed by the values found in this
field.
aggregate_layer_name (string): name of shapefile layer that will be
used to aggregate results over. If set to None, the first layer
in the DataSource will be used as retrieved by `.GetLayer()`.
Note: it is normal and expected to set this field at None if the
aggregating shapefile is a single layer as many shapefiles,
including the common 'ESRI Shapefile', are.
ignore_nodata: if true, then nodata pixels are not accounted for when
calculating min, max, count, or mean. However, the value of
`nodata_count` will always be the number of nodata pixels
aggregated under the polygon.
all_touched (boolean): if true will account for any pixel whose
geometry passes through the pixel, not just the center point.
polygons_might_overlap (boolean): if True the function calculates
aggregation coverage close to optimally by rasterizing sets of
polygons that don't overlap. However, this step can be
computationally expensive for cases where there are many polygons.
Setting this flag to False directs the function rasterize in one
step.
working_dir (string): If not None, indicates where temporary files
should be created during this run.
Returns:
nested dictionary indexed by aggregating feature id, and then by one
of 'min' 'max' 'sum' 'mean' 'count' and 'nodata_count'. Example:
{0: {'min': 0, 'max': 1, 'mean': 0.5, 'count': 2, 'nodata_count': 1}}
"""
import uuid
import shutil
if not _is_raster_path_band_formatted(base_raster_path_band):
raise ValueError(
"`base_raster_path_band` not formatted as expected. Expects "
"(path, band_index), recieved %s" + base_raster_path_band)
aggregate_vector = gdal.OpenEx(aggregate_vector_path)
if aggregate_layer_name is not None:
aggregate_layer = aggregate_vector.GetLayerByName(
aggregate_layer_name)
else:
aggregate_layer = aggregate_vector.GetLayer()
aggregate_layer_defn = aggregate_layer.GetLayerDefn()
aggregate_field_index = aggregate_layer_defn.GetFieldIndex(
aggregate_field_name)
if aggregate_field_index == -1: # -1 returned when field does not exist.
# Raise exception if user provided a field that's not in vector
raise ValueError(
'Vector %s must have a field named %s' %
(aggregate_vector_path, aggregate_field_name))
aggregate_field_def = aggregate_layer_defn.GetFieldDefn(
aggregate_field_index)
# create a new aggregate ID field to map base vector aggregate fields to
# local ones that are guaranteed to be integers.
local_aggregate_field_name = str(uuid.uuid4())[-8:-1]
local_aggregate_field_def = ogr.FieldDefn(
local_aggregate_field_name, ogr.OFTInteger)
# Adding the rasterize by attribute option
rasterize_layer_args = {
'options': [
'ALL_TOUCHED=%s' % str(all_touched).upper(),
'ATTRIBUTE=%s' % local_aggregate_field_name]
}
# clip base raster to aggregating vector intersection
raster_info = get_raster_info(base_raster_path_band[0])
# -1 here because bands are 1 indexed
print(raster_info)
raster_nodata = None
with tempfile.NamedTemporaryFile(
prefix='clipped_raster', delete=False,
dir=working_dir) as clipped_raster_file:
clipped_raster_path = clipped_raster_file.name
align_and_resize_raster_stack(
[base_raster_path_band[0]], [clipped_raster_path], ['nearest'],
raster_info['pixel_size'], 'intersection',
base_vector_path_list=[aggregate_vector_path], raster_align_index=0)
clipped_raster = gdal.OpenEx(clipped_raster_path)
# make a shapefile that non-overlapping layers can be added to
driver = ogr.GetDriverByName('ESRI Shapefile')
disjoint_vector_dir = tempfile.mkdtemp(dir=working_dir)
disjoint_vector = driver.CreateDataSource(
os.path.join(disjoint_vector_dir, 'disjoint_vector.shp'))
spat_ref = aggregate_layer.GetSpatialRef()
# Initialize these dictionaries to have the shapefile fields in the
# original datasource even if we don't pick up a value later
base_to_local_aggregate_value = {}
for feature in aggregate_layer:
aggregate_field_value = feature.GetField(aggregate_field_name)
# this builds up a map of aggregate field values to unique ids
if aggregate_field_value not in base_to_local_aggregate_value:
base_to_local_aggregate_value[aggregate_field_value] = len(
base_to_local_aggregate_value)
aggregate_layer.ResetReading()
# Loop over each polygon and aggregate
if polygons_might_overlap:
minimal_polygon_sets = calculate_disjoint_polygon_set(
aggregate_vector_path)
else:
minimal_polygon_sets = [
set([feat.GetFID() for feat in aggregate_layer])]
clipped_band = clipped_raster.GetRasterBand(base_raster_path_band[1])
with tempfile.NamedTemporaryFile(
prefix='aggregate_id_raster',
delete=False, dir=working_dir) as aggregate_id_raster_file:
aggregate_id_raster_path = aggregate_id_raster_file.name
aggregate_id_nodata = len(base_to_local_aggregate_value)
new_raster_from_base(
clipped_raster_path, aggregate_id_raster_path, gdal.GDT_Int32,
[aggregate_id_nodata])
aggregate_id_raster = gdal.OpenEx(aggregate_id_raster_path, gdal.GA_Update)
aggregate_stats = {}
for polygon_set in minimal_polygon_sets:
disjoint_layer = disjoint_vector.CreateLayer(
'disjoint_vector', spat_ref, ogr.wkbPolygon)
disjoint_layer.CreateField(local_aggregate_field_def)
# add polygons to subset_layer
for index, poly_fid in enumerate(polygon_set):
poly_feat = aggregate_layer.GetFeature(poly_fid)
disjoint_layer.CreateFeature(poly_feat)
# we seem to need to reload the feature and set the index because
# just copying over the feature left indexes as all 0s. Not sure
# why.
new_feat = disjoint_layer.GetFeature(index)
new_feat.SetField(
local_aggregate_field_name, base_to_local_aggregate_value[
poly_feat.GetField(aggregate_field_name)])
disjoint_layer.SetFeature(new_feat)
disjoint_layer.SyncToDisk()
# nodata out the mask
aggregate_id_band = aggregate_id_raster.GetRasterBand(1)
aggregate_id_band.Fill(aggregate_id_nodata)
aggregate_id_band = None
gdal.RasterizeLayer(
aggregate_id_raster, [1], disjoint_layer, **rasterize_layer_args)
aggregate_id_raster.FlushCache()
# Delete the features we just added to the subset_layer
disjoint_layer = None
disjoint_vector.DeleteLayer(0)
# create a key array
# and parallel min, max, count, and nodata count arrays
for aggregate_id_offsets, aggregate_id_block in iterblocks(
aggregate_id_raster_path):
clipped_block = clipped_band.ReadAsArray(**aggregate_id_offsets)
# guard against a None nodata type
valid_mask = numpy.ones(aggregate_id_block.shape, dtype=bool)
if aggregate_id_nodata is not None:
valid_mask[:] = aggregate_id_block != aggregate_id_nodata
valid_aggregate_id = aggregate_id_block[valid_mask]
valid_clipped = clipped_block[valid_mask]
for aggregate_id in numpy.unique(valid_aggregate_id):
aggregate_mask = valid_aggregate_id == aggregate_id
masked_clipped_block = valid_clipped[aggregate_mask]
clipped_nodata_mask = (masked_clipped_block == raster_nodata)
if aggregate_id not in aggregate_stats:
aggregate_stats[aggregate_id] = {
'min': None,
'max': None,
'count': 0,
'nodata_count': 0,
'sum': 0.0
}
aggregate_stats[aggregate_id]['nodata_count'] += (
numpy.count_nonzero(clipped_nodata_mask))
if ignore_nodata:
masked_clipped_block = (
masked_clipped_block[~clipped_nodata_mask])
if masked_clipped_block.size == 0:
continue
if aggregate_stats[aggregate_id]['min'] is None:
aggregate_stats[aggregate_id]['min'] = (
masked_clipped_block[0])
aggregate_stats[aggregate_id]['max'] = (
masked_clipped_block[0])
aggregate_stats[aggregate_id]['min'] = min(
numpy.min(masked_clipped_block),
aggregate_stats[aggregate_id]['min'])
aggregate_stats[aggregate_id]['max'] = max(
numpy.max(masked_clipped_block),
aggregate_stats[aggregate_id]['max'])
aggregate_stats[aggregate_id]['count'] += (
masked_clipped_block.size)
aggregate_stats[aggregate_id]['sum'] += numpy.sum(
masked_clipped_block)
# clean up temporary files
clipped_band = None
clipped_raster = None
aggregate_id_raster = None
disjoint_layer = None
disjoint_vector = None
for filename in [aggregate_id_raster_path, clipped_raster_path]:
os.remove(filename)
shutil.rmtree(disjoint_vector_dir)
# map the local ids back to the original base value
local_to_base_aggregate_value = {
value: key for key, value in
base_to_local_aggregate_value.iteritems()}
return {
local_to_base_aggregate_value[key]: value
for key, value in aggregate_stats.iteritems()}
def calculate_disjoint_polygon_set(vector_path, layer_index=0):
"""Create a list of sets of polygons that don't overlap.
Determining the minimal number of those sets is an np-complete problem so
this is an approximation that builds up sets of maximal subsets.
Parameters:
vector_path (string): a path to an OGR vector.
layer_index (int): index of underlying layer in `vector_path` to
calculate disjoint set. Defaults to 0.
Returns:
subset_list (list): list of sets of FIDs from vector_path
"""
import heapq
vector = gdal.OpenEx(vector_path)
vector_layer = vector.GetLayer()
poly_intersect_lookup = {}
for poly_feat in vector_layer:
poly_wkt = poly_feat.GetGeometryRef().ExportToWkt()
shapely_polygon = shapely.wkt.loads(poly_wkt)
poly_wkt = None
poly_fid = poly_feat.GetFID()
poly_intersect_lookup[poly_fid] = {
'poly': shapely_polygon,
'intersects': set(),
}
vector_layer = None
vector = None
for poly_fid in poly_intersect_lookup:
polygon = shapely.prepared.prep(
poly_intersect_lookup[poly_fid]['poly'])
for intersect_poly_fid in poly_intersect_lookup:
if intersect_poly_fid == poly_fid or polygon.intersects(
poly_intersect_lookup[intersect_poly_fid]['poly']):
poly_intersect_lookup[poly_fid]['intersects'].add(
intersect_poly_fid)
polygon = None
# Build maximal subsets
subset_list = []
while len(poly_intersect_lookup) > 0:
# sort polygons by increasing number of intersections
heap = []
for poly_fid, poly_dict in poly_intersect_lookup.iteritems():
heapq.heappush(
heap, (len(poly_dict['intersects']), poly_fid, poly_dict))
# build maximal subset
maximal_set = set()
while len(heap) > 0:
_, poly_fid, poly_dict = heapq.heappop(heap)
for maxset_fid in maximal_set:
if maxset_fid in poly_intersect_lookup[poly_fid]['intersects']:
# it intersects and can't be part of the maximal subset
break
else:
# made it through without an intersection, add poly_fid to
# the maximal set
maximal_set.add(poly_fid)
# remove that polygon and update the intersections
del poly_intersect_lookup[poly_fid]
# remove all the polygons from intersections once they're compuated
for maxset_fid in maximal_set:
for poly_dict in poly_intersect_lookup.itervalues():
poly_dict['intersects'].discard(maxset_fid)
subset_list.append(maximal_set)
return subset_list
def calculate_raster_stats(raster_path):
"""Calculate and set min, max, stdev, and mean for all bands in raster.
Parameters:
raster_path (string): a path to a GDAL raster raster that will be
modified by having its band statistics set
Returns:
None
"""
raster = gdal.OpenEx(raster_path, gdal.GA_Update)
raster_properties = get_raster_info(raster_path)
for band_index in range(raster.RasterCount):
target_min = None
target_max = None
target_n = 0
target_sum = 0.0
for _, target_block in iterblocks(
raster_path, band_index_list=[band_index+1]):
nodata_target = raster_properties['nodata'][band_index]
# guard against an undefined nodata target
valid_mask = numpy.ones(target_block.shape, dtype=bool)
if nodata_target is not None:
valid_mask[:] = target_block != nodata_target
valid_block = target_block[valid_mask]
if valid_block.size == 0:
continue
if target_min is None:
# initialize first min/max
target_min = target_max = valid_block[0]
target_sum += numpy.sum(valid_block)
target_min = min(numpy.min(valid_block), target_min)
target_max = max(numpy.max(valid_block), target_max)
target_n += valid_block.size
if target_min is not None:
target_mean = target_sum / float(target_n)
stdev_sum = 0.0
for _, target_block in iterblocks(
raster_path, band_index_list=[band_index+1]):
# guard against an undefined nodata target
valid_mask = numpy.ones(target_block.shape, dtype=bool)
if nodata_target is not None:
valid_mask = target_block != nodata_target
valid_block = target_block[valid_mask]
stdev_sum += numpy.sum((valid_block - target_mean) ** 2)
target_stddev = (stdev_sum / float(target_n)) ** 0.5
target_band = raster.GetRasterBand(band_index+1)
target_band.SetStatistics(
float(target_min), float(target_max), float(target_mean),
float(target_stddev))
target_band = None
else:
LOGGER.warn(
"Stats not calculated for %s band %d since no non-nodata "
"pixels were found.", raster_path, band_index+1)
raster = None
| 127 | 0 | 53 |
01531490b5a75ebd803634367f6ed969cdc69f75 | 2,233 | py | Python | backend/course_application/sources/services/course_service.py | heyImDrew/edupro | 98b8342dda45071da4871bbf73f2ef002fee938f | [
"Apache-2.0"
] | null | null | null | backend/course_application/sources/services/course_service.py | heyImDrew/edupro | 98b8342dda45071da4871bbf73f2ef002fee938f | [
"Apache-2.0"
] | null | null | null | backend/course_application/sources/services/course_service.py | heyImDrew/edupro | 98b8342dda45071da4871bbf73f2ef002fee938f | [
"Apache-2.0"
] | null | null | null | import random
from ..serializers.course_serializers import CourseSerializer
from ..serializers.partition_serializer import PartitionSerializer, PartitionTaskSerializer
from ..models.user_course import UserCourse
from ..models.course import Course
from ..models.partition import Partition
from ..models.partition_task import PartitionTask
course_service = CourseService()
| 37.847458 | 94 | 0.677116 | import random
from ..serializers.course_serializers import CourseSerializer
from ..serializers.partition_serializer import PartitionSerializer, PartitionTaskSerializer
from ..models.user_course import UserCourse
from ..models.course import Course
from ..models.partition import Partition
from ..models.partition_task import PartitionTask
class CourseService:
def append_partitions(self, course_id, data):
partitions = Partition.objects.filter(course_id=course_id).order_by('index')
serializer = PartitionSerializer(partitions, many=True)
data['partitions'] = serializer.data
for item in data['partitions']:
if item['including_task']:
task = PartitionTask.objects.get(partition_id=item['partition_id'])
serializer_task = PartitionTaskSerializer(task)
item['task'] = serializer_task.data
return data
def list(self, user_id):
courses_ids = UserCourse.objects.filter(user_id=user_id).values_list('course_id')
courses = Course.objects.filter(course_id__in=courses_ids)
serializer = CourseSerializer(courses, many=True)
data = serializer.data
for item in data:
liked = UserCourse.objects.get(user_id=user_id, course_id=item['course_id']).liked
item['toggle'] = liked
return data
def retrieve(self, course_id):
course = Course.objects.get(course_id=course_id)
serializer = CourseSerializer(course)
data = serializer.data
data = self.append_partitions(course_id, data)
return data
def toggle(self, user_id, course_id):
course = UserCourse.objects.get(user_id=user_id, course_id=course_id)
course.liked = False if course.liked else True
course.save()
return {"message": "OK"}
def amount(self, user_id):
courses = self.list(user_id)
return {"courses": len(courses)}
def random(self, user_id):
user_desks = UserCourse.objects.filter(
user_id=user_id
)
if len(user_desks) == 0:
return {'course_id': None}
return {'course_id': random.choice(user_desks).course_id}
course_service = CourseService()
| 1,676 | -1 | 184 |
1dd066a74be8c7882c1f9c99223ba4a076b3de2d | 1,973 | py | Python | breeds/migrations/0002_auto_20180430_1118.py | rocity/simple-endpoints | 6754a7353dd6b71f19276b67467297e951129a45 | [
"MIT"
] | null | null | null | breeds/migrations/0002_auto_20180430_1118.py | rocity/simple-endpoints | 6754a7353dd6b71f19276b67467297e951129a45 | [
"MIT"
] | null | null | null | breeds/migrations/0002_auto_20180430_1118.py | rocity/simple-endpoints | 6754a7353dd6b71f19276b67467297e951129a45 | [
"MIT"
] | null | null | null | # Generated by Django 2.0.4 on 2018-04-30 11:18
from django.db import migrations, models
import django.utils.timezone
| 37.942308 | 114 | 0.562088 | # Generated by Django 2.0.4 on 2018-04-30 11:18
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('breeds', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Anime',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('description', models.TextField()),
('director', models.CharField(max_length=255)),
('release_date', models.DateField()),
('score', models.PositiveSmallIntegerField(default=0)),
('picture', models.ImageField(upload_to='')),
('date_modified', models.DateTimeField(auto_now=True)),
('date_created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Website',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('link', models.URLField()),
('owner', models.CharField(max_length=255)),
('logo', models.ImageField(upload_to='')),
('date_modified', models.DateTimeField(auto_now=True)),
('date_created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.AddField(
model_name='breed',
name='date_created',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='breed',
name='date_modified',
field=models.DateTimeField(auto_now=True),
),
]
| 0 | 1,830 | 23 |
510eda9630efc129fe5bb152e8a858ee54a8d0b9 | 2,365 | py | Python | build.py | rpokemon/ideasforpokemon | b135b3f78a77abaf533b38c4eec75509b1a4a152 | [
"MIT"
] | null | null | null | build.py | rpokemon/ideasforpokemon | b135b3f78a77abaf533b38c4eec75509b1a4a152 | [
"MIT"
] | null | null | null | build.py | rpokemon/ideasforpokemon | b135b3f78a77abaf533b38c4eec75509b1a4a152 | [
"MIT"
] | null | null | null | #! python3
import datetime
import json
from csscompressor import compress
# Run Python script
if __name__ == '__main__':
main()
| 35.298507 | 164 | 0.586469 | #! python3
import datetime
import json
from csscompressor import compress
def main():
# Open config file
with open("config.json", 'r') as config_file:
config = json.load(config_file)
# Incriment the build counter
config["build"] += 1
# Print starting script message
print("\n{} CSS; build #{}\n\nStarting...\n".format(config["name"], config["build"]))
# Generate the CSS file header comment
compiled_css = ""
author_comment = "/*\n\tStylesheet for {}; build #{}\n\tAuthor{}: {}\n\tBuild Date: {}\n*/\n\n".format(
config["name"],
config["build"],
"s" if len(config["authors"]) > 1 else "",
" & ".join(", ".join(config["authors"]).rsplit(', ', 1)),
datetime.datetime.utcnow().strftime("%m/%d/%Y @ %H:%M UTC")
)
# Print messages for css file
print("Reading from CSS files:")
# Open indivual CSS files
for file in config["files"]:
# Try opening file
try:
with open('{}/'.format(config["css_directory"]) + file, 'r') as css_file:
# Add css file to final CSS file
compiled_css += "/* {}\n------------------------------------------------------------------------------ */\n".format(file) + css_file.read() + "\n\n"
print("\t Succesfully added \"{}/{}\"".format(config["css_directory"], file))
# Print error if file not found
except FileNotFoundError:
print("\tError reading \"{}/{}\": File not Found".format(config["css_directory"], file))
# Write css to file
with open(config["unminified_file"], 'w') as output_file:
output_file.write(author_comment + compiled_css)
output_file.close()
# Write minified css to file
with open(config["minified_file"], 'w') as output_file:
output_file.write(author_comment + compress(compiled_css))
output_file.close()
# Write config to file
with open("config.json", 'w') as config_file:
config_file.write(json.dumps(config, sort_keys=True, indent=4))
config_file.close()
# Print confirming CSS file was succesfully generated
print("\nSuccesfully generated css files!")
print("\tunminified: {}, minified: {}".format(config["unminified_file"], config["minified_file"]))
# Run Python script
if __name__ == '__main__':
main()
| 2,209 | 0 | 23 |
976ac13a0d17f78cc33df43ffbf0a73639846a80 | 1,783 | py | Python | s3dis_viz.py | zghera/pvcnn-tf | 9aaae991d8117736d0cb260bb525cf5e90c93f21 | [
"MIT"
] | null | null | null | s3dis_viz.py | zghera/pvcnn-tf | 9aaae991d8117736d0cb260bb525cf5e90c93f21 | [
"MIT"
] | null | null | null | s3dis_viz.py | zghera/pvcnn-tf | 9aaae991d8117736d0cb260bb525cf5e90c93f21 | [
"MIT"
] | null | null | null | """Demo to visualize data pipeline output."""
import matplotlib.pyplot as plt
import tensorflow as tf
from dataloaders.s3dis import create_s3dis_dataset
if __name__ == "__main__":
main(create_pointcloud_dump=False)
| 25.84058 | 73 | 0.614133 | """Demo to visualize data pipeline output."""
import matplotlib.pyplot as plt
import tensorflow as tf
from dataloaders.s3dis import create_s3dis_dataset
def main(create_pointcloud_dump: bool):
objects = {
0: "clutter",
1: "ceiling",
2: "floor",
3: "wall",
4: "beam",
5: "column",
6: "door",
7: "window",
8: "table",
9: "chair",
10: "sofa",
11: "bookcase",
12: "board",
}
dataset, _ = create_s3dis_dataset(
"./data/s3dis/pointcnn/",
shuffle_size=1,
batch_size=1,
num_points=10000,
use_normalized_coords=False,
holdout_area=5,
is_train_split=False,
is_deterministic=False,
num_classes=13,
seed=1,
)
x, y = tuple(tf.squeeze(tensor) for tensor in next(iter(dataset)))
x = x[:3, :]
y = tf.argmax(y, axis=0)
print(f"sample shape = {x.shape} | label shape = {y.shape}")
fig = plt.figure()
ax = fig.add_subplot(projection="3d")
if create_pointcloud_dump:
open("scene-data.txt", "w", encoding="UTF-8").close()
for i, item_name in objects.items():
if i <= 3:
continue
mask = tf.equal(y, tf.cast(tf.fill([y.shape[0]], i), dtype=tf.int64))
cur_x = tf.boolean_mask(x, mask, axis=1)
print(item_name, cur_x.shape)
if cur_x.shape[1] > 0:
ax.scatter(cur_x[0, :], cur_x[1, :], cur_x[2, :], label=item_name)
if create_pointcloud_dump:
with open("scene-data.txt", "a", encoding="UTF-8") as fp:
for j in range(cur_x.shape[1]):
print(f"{cur_x[0,j]} {cur_x[1,j]} {cur_x[2,j]}", file=fp)
ax.set_xlabel("X")
ax.set_ylabel("Y")
ax.set_zlabel("Z")
plt.legend()
plt.show()
# plt.savefig("s3dis-data-pipeline-output.png") # Use for WSL dev
if __name__ == "__main__":
main(create_pointcloud_dump=False)
| 1,540 | 0 | 23 |
5f0c939440824c40818d4392a6861bfb95e0b707 | 636 | py | Python | heron/dsl/src/python/__init__.py | zhengyangtean/CG4001_Heron_ElasticBolt | e471593c265088996d485e2dd11c9f2484876568 | [
"Apache-2.0"
] | 1 | 2017-11-06T08:23:43.000Z | 2017-11-06T08:23:43.000Z | heron/dsl/src/python/__init__.py | zhengyangtean/CG4001_Heron_ElasticBolt | e471593c265088996d485e2dd11c9f2484876568 | [
"Apache-2.0"
] | null | null | null | heron/dsl/src/python/__init__.py | zhengyangtean/CG4001_Heron_ElasticBolt | e471593c265088996d485e2dd11c9f2484876568 | [
"Apache-2.0"
] | null | null | null | """
The top-level library for Heron's Python DSL, which enables you to write Heron
[topologies](https://twitter.github.io/heron/docs/concepts/topologies/) in
a Python DSL.
Heron topologies are acyclic graphs used to process streaming data. Topologies
have two major components:
[spouts](spout/spout.m.html#heron_py.spout.spout.Spout) pull data into the
topology and then [emit](spout/spout.m.html#heron_py.spout.spout.Spout.emit)
that data as tuples (lists in Python) to
[bolts](bolt/bolt.m.html#heron_py.bolt.bolt.Bolt) that process that data.
"""
# Load basic dsl modules
from .streamlet import Streamlet, OperationType, TimeWindow
| 39.75 | 78 | 0.784591 | """
The top-level library for Heron's Python DSL, which enables you to write Heron
[topologies](https://twitter.github.io/heron/docs/concepts/topologies/) in
a Python DSL.
Heron topologies are acyclic graphs used to process streaming data. Topologies
have two major components:
[spouts](spout/spout.m.html#heron_py.spout.spout.Spout) pull data into the
topology and then [emit](spout/spout.m.html#heron_py.spout.spout.Spout.emit)
that data as tuples (lists in Python) to
[bolts](bolt/bolt.m.html#heron_py.bolt.bolt.Bolt) that process that data.
"""
# Load basic dsl modules
from .streamlet import Streamlet, OperationType, TimeWindow
| 0 | 0 | 0 |
b5b8d9b15a585714c65998304ca44e6f5f5f48fc | 3,214 | py | Python | train_mask_detector.py | ali-commits/pyhtonML-FaceMaskDetection | 506294ba97786b2eb9c63936d9078eca4d5b4707 | [
"MIT"
] | null | null | null | train_mask_detector.py | ali-commits/pyhtonML-FaceMaskDetection | 506294ba97786b2eb9c63936d9078eca4d5b4707 | [
"MIT"
] | null | null | null | train_mask_detector.py | ali-commits/pyhtonML-FaceMaskDetection | 506294ba97786b2eb9c63936d9078eca4d5b4707 | [
"MIT"
] | null | null | null | from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.applications import MobileNetV2
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.layers import AveragePooling2D
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
from tensorflow.keras.utils import to_categorical
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
import matplotlib.pyplot as plt
import cv2
import numpy as np
import os
INIT_LR = 1e-4
EPOCHS = 5
BS = 32
DIRECTORY = "dataset"
CATEGORIES = os.listdir(DIRECTORY) # ["with_mask", "without_mask"]
print("[INFO] loading images...")
data = []
labels = []
# Preprocessing
for category in CATEGORIES:
path = os.path.join(DIRECTORY, category)
for img_name in os.listdir(path):
img_path = os.path.join(path, img_name)
image = load_img(img_path, target_size=(224, 224))
image = img_to_array(image)
image = preprocess_input(image)
data.append(image)
labels.append(category)
lb = LabelBinarizer()
labels = lb.fit_transform(labels)
labels = to_categorical(labels)
data = np.array(data, dtype="float32")
labels = np.array(labels)
trainX, testX, trainY, testY = train_test_split(
data, labels, test_size=0.2, stratify=labels, random_state=42
)
# trainX = data
# testX = data
# trainY = labels
# testY = labels
# Learning
aug = ImageDataGenerator(
rotation_range=30,
zoom_range=0.10,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.10,
horizontal_flip=True,
fill_mode="nearest",
)
baseModel = MobileNetV2(
weights="imagenet", include_top=False, input_tensor=Input(shape=(224, 224, 3))
)
headModel = baseModel.output
headModel = AveragePooling2D(pool_size=(7, 7))(headModel)
headModel = Flatten(name="flatten")(headModel)
headModel = Dense(128, activation="relu")(headModel)
headModel = Dropout(0.5)(headModel)
headModel = Dense(2, activation="softmax")(headModel)
model = Model(inputs=baseModel.input, outputs=headModel)
for layer in baseModel.layers:
layer.trainable = False
print("[INFO] compiling model...")
opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS)
model.compile(loss="binary_crossentropy", optimizer=opt, metrics=["accuracy"])
print("[INFO] training head")
H = model.fit(
aug.flow(trainX, trainY, batch_size=BS),
steps_per_epoch=len(trainX) // BS,
validation_data=(testX, testY),
validation_steps=len(testX) // BS,
epochs=EPOCHS,
)
print("[INFO] evaluating network")
predIdxs = model.predict(testX, batch_size=BS)
predIdxs = np.argmax(predIdxs, axis=1)
print(classification_report(testY.argmax(axis=1), predIdxs, target_names=lb.classes_))
print("[INFO] saving mask detector model...")
model.save("mask_detector.model", save_format="h5")
| 28.442478 | 86 | 0.756067 | from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.applications import MobileNetV2
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.layers import AveragePooling2D
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
from tensorflow.keras.utils import to_categorical
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
import matplotlib.pyplot as plt
import cv2
import numpy as np
import os
INIT_LR = 1e-4
EPOCHS = 5
BS = 32
DIRECTORY = "dataset"
CATEGORIES = os.listdir(DIRECTORY) # ["with_mask", "without_mask"]
print("[INFO] loading images...")
data = []
labels = []
# Preprocessing
for category in CATEGORIES:
path = os.path.join(DIRECTORY, category)
for img_name in os.listdir(path):
img_path = os.path.join(path, img_name)
image = load_img(img_path, target_size=(224, 224))
image = img_to_array(image)
image = preprocess_input(image)
data.append(image)
labels.append(category)
lb = LabelBinarizer()
labels = lb.fit_transform(labels)
labels = to_categorical(labels)
data = np.array(data, dtype="float32")
labels = np.array(labels)
trainX, testX, trainY, testY = train_test_split(
data, labels, test_size=0.2, stratify=labels, random_state=42
)
# trainX = data
# testX = data
# trainY = labels
# testY = labels
# Learning
aug = ImageDataGenerator(
rotation_range=30,
zoom_range=0.10,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.10,
horizontal_flip=True,
fill_mode="nearest",
)
baseModel = MobileNetV2(
weights="imagenet", include_top=False, input_tensor=Input(shape=(224, 224, 3))
)
headModel = baseModel.output
headModel = AveragePooling2D(pool_size=(7, 7))(headModel)
headModel = Flatten(name="flatten")(headModel)
headModel = Dense(128, activation="relu")(headModel)
headModel = Dropout(0.5)(headModel)
headModel = Dense(2, activation="softmax")(headModel)
model = Model(inputs=baseModel.input, outputs=headModel)
for layer in baseModel.layers:
layer.trainable = False
print("[INFO] compiling model...")
opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS)
model.compile(loss="binary_crossentropy", optimizer=opt, metrics=["accuracy"])
print("[INFO] training head")
H = model.fit(
aug.flow(trainX, trainY, batch_size=BS),
steps_per_epoch=len(trainX) // BS,
validation_data=(testX, testY),
validation_steps=len(testX) // BS,
epochs=EPOCHS,
)
print("[INFO] evaluating network")
predIdxs = model.predict(testX, batch_size=BS)
predIdxs = np.argmax(predIdxs, axis=1)
print(classification_report(testY.argmax(axis=1), predIdxs, target_names=lb.classes_))
print("[INFO] saving mask detector model...")
model.save("mask_detector.model", save_format="h5")
| 0 | 0 | 0 |
e22a9c687b78795ce4ef99ea128b7372e86e57eb | 505 | py | Python | api/test/test_statics_inhabitants.py | Viet2503/rki-vaccination-data | a40ff7be7e55850f5184cebc3cc1049541c87282 | [
"MIT"
] | 27 | 2020-12-30T06:47:23.000Z | 2021-12-05T14:14:53.000Z | api/test/test_statics_inhabitants.py | rphl/rki-vaccination-data | 360f1456613bfd2f0e6199016dde3941ed6f234a | [
"MIT"
] | 56 | 2020-12-31T09:00:57.000Z | 2022-01-19T18:17:15.000Z | api/test/test_statics_inhabitants.py | rphl/rki-vaccination-data | 360f1456613bfd2f0e6199016dde3941ed6f234a | [
"MIT"
] | 7 | 2020-12-30T13:39:58.000Z | 2021-11-21T13:52:53.000Z | """ Unittest """
import unittest
# pylint: disable=import-error
from _utils.statics import inhabitants
class TestModuleStaticsInhabitants(unittest.TestCase):
""" Test Module for statics.inhabitants """
def test_total(self):
""" Test for inhabitants.TOTAL """
self.assertEqual(type(inhabitants.TOTAL).__name__, "int")
def test_states(self):
""" Test for inhabitants.STATES """
self.assertEqual(type(inhabitants.STATES).__name__,"dict")
if __name__ == '__main__':
unittest.main()
| 28.055556 | 62 | 0.724752 | """ Unittest """
import unittest
# pylint: disable=import-error
from _utils.statics import inhabitants
class TestModuleStaticsInhabitants(unittest.TestCase):
""" Test Module for statics.inhabitants """
def test_total(self):
""" Test for inhabitants.TOTAL """
self.assertEqual(type(inhabitants.TOTAL).__name__, "int")
def test_states(self):
""" Test for inhabitants.STATES """
self.assertEqual(type(inhabitants.STATES).__name__,"dict")
if __name__ == '__main__':
unittest.main()
| 0 | 0 | 0 |
a102ca24a694ce5cda55ec16de27a47047d2234b | 21,181 | py | Python | nestedsulcusfeatures_HBM2011/register_to_template.py | binarybottle/nestedsulcusfeatures_HBM2011 | 4a1e064316b73c268c49383a34f49baca23cef93 | [
"Apache-2.0"
] | null | null | null | nestedsulcusfeatures_HBM2011/register_to_template.py | binarybottle/nestedsulcusfeatures_HBM2011 | 4a1e064316b73c268c49383a34f49baca23cef93 | [
"Apache-2.0"
] | null | null | null | nestedsulcusfeatures_HBM2011/register_to_template.py | binarybottle/nestedsulcusfeatures_HBM2011 | 4a1e064316b73c268c49383a34f49baca23cef93 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
"""
Register brains, landmarks, and labels to a template.
(c) 2011, @rno klein
"""
import os
from os.path import exists
from subprocess import call
from numpy import float, isnan
# Run intensity-based registration
# 1. Register brains to template
# 2. Transform brains to each other via template
# 3. Transform landmarks to template
register_to_template = 1
transform_pairs_via_template = 1
transform_landmarks_to_template = 0
# Run landmark-driven registration to template:
register_landmarks_to_template = 0
transform_landmarks_via_template = 0
# Atlas-based evaluation for the above settings:
# 1. prepare target atlas mask
# 2. transform source atlas
# 3. fill #1 with #2
# 4. measure overlap of #3 with target atlas labels
prepare_target_mask = 0
evaluate_with_atlases = 1
verbose = 1
dim = 3
#
# Files
#
source_files = ['m1','m2','m3','m4','m5','m6','m7','m8','m9','m10','m11','m12']
target_files = ['m1','m2','m3','m4','m5','m6','m7','m8','m9','m10','m11','m12']
#source_files = ['m1','m2','m3','m4']#,'m5','m6']
#target_files = ['m1','m2','m3','m4']#,'m5','m6']
ANTSPATH = os.environ.get("ANTSPATH")
FSLPATH = '/usr/local/fsl/bin/'
out_path = '/hd2/Archive/registration_evaluation_2011_output/'
xfm_dir = os.path.join( out_path, 'Transforms/')
xfm_brain_dir = os.path.join( out_path, 'Transformed_Brains/')
xfm_landmarks_dir = os.path.join( out_path, 'Transformed_Landmarks/')
xfm_atlas_dir = os.path.join( out_path, 'Transformed_Atlases/')
atlas_dir = '/hd2/Brains/CUMC12/Atlases/'
brain_dir = '/hd2/Brains/CUMC12/Brains/'
brainmask_dir = '/hd2/Brains/CUMC12/BrainMasks/'
ext = '.nii.gz'
template = '/hd2/Brains/CUMC12/CUMC12template.nii.gz'
landmarks_dir = '/hd2/Brains/CUMC12/Landmarks/pits_kiho_im_binary/'
landmark_type = 'pits_kiho_im'
landmarks_dir = '/hd2/Brains/CUMC12/Landmarks/pits_yrjo_hame_binary/'
landmark_type = 'pits_yrjo_hame'
landmarks_dir = '/hd2/Brains/CUMC12/Landmarks/pits_forrest_bao_binary/'
landmark_type = 'pits_forrest_bao'
landmarks_dir = '/hd2/Brains/CUMC12/Landmarks/ribbons_brain_visa_binary/'
landmark_type = 'ribbons_brain_visa'
landmarks_dir = '/hd2/Brains/CUMC12/Landmarks/fundi_gang_li_binary/'
landmark_type = 'fundi_gang_li'
landmarks_dir = '/hd2/Brains/CUMC12/Landmarks/fundi_brain_visa_binary/'
landmark_type = 'fundi_brain_visa'
landmarks_dir = '/hd2/Brains/CUMC12/Landmarks/fundi_forrest_bao_binary/'
landmark_type = 'fundi_forrest_bao'
results_dir = os.path.join( out_path, 'Results/')
label_file = 'CUMC12_labels_regions.txt'
#
# Registration parameters
#
gradient_step_size = 0.5
iterations = "30x100x10"
options = " --use-Histogram-Matching"
initialize = " --number-of-affine-iterations 10000x10000x10000x10000x10000"
warp = ANTSPATH + "ANTS " + str(dim) + " -t SyN[" + str(gradient_step_size) +"] -i " + \
str(iterations) + options + initialize
apply_warp = ANTSPATH + "WarpImageMultiTransform " + str(dim)
#
# Regularization parameters
#
regularizer = "Gauss"
regularizer_setting = 3
deformation_field_sigma = 0
regularize = "-r Gauss[" + str(regularizer_setting) + ", " + \
str(deformation_field_sigma) + "]"
#
# Intensity parameters
#
intensity_measure = "CC"
intensity_weight = 1.0
intensity_setting = 3
#
# Landmark parameters
#
landmark_measure1 = "PSE"
landmark_measure2 = "MSQ"
landmark_weight1 = 0.1
landmark_weight2 = 0.1
percent = 1.0 # real number: 1.0 = 100%
boundary = 0 # 0: not only boundaries
sigma = 10
neighbor = 100
matching_iter = 100000 # partial matching iterations
if evaluate_with_atlases:
f = open(label_file,'r')
label_table = f.readlines()
f.close()
labels = []
for row in label_table:
labels.append(int(row.split()[0]))
#------------------------------------------
# Register brains and landmarks to template
#------------------------------------------
if register_to_template + transform_landmarks_to_template + \
prepare_target_mask > 0:
for file in source_files:
source = brain_dir+file+ext
output = xfm_dir+file+'_to_template'
out = '-o ' + output+ext
if os.path.exists(source) and os.path.exists(template) and os.path.exists(xfm_dir):
# Intensity-based registration to template:
if register_to_template:
intensity = [template, source, intensity_weight, intensity_setting]
intensity = "-m "+intensity_measure+"[" + ", ".join([str(s) for s in intensity]) + "]"
args = " ".join([warp, regularize, intensity, out])
if verbose: print(args); print(''); p = call(args, shell="True")
# Prepare binary (target atlas) masks for filling with labels:
if prepare_target_mask:
args = " ".join(['c3d', atlas_dir+file+ext, '-binarize -o', brainmask_dir+file+ext])
if verbose: print(args); print(''); p = call(args, shell="True")
# Transform landmarks to template space:
if transform_landmarks_to_template:
source_landmarks = landmarks_dir+file+ext
output_landmarks = xfm_landmarks_dir+file+'_to_template_'+landmark_type+ext
try:
os.path.exists(source_landmarks) and os.path.exists(xfm_landmarks_dir)
except:
raise NameError('Check ' + source_landmarks + ' and ' + xfm_landmarks_dir)
args = " ".join([apply_warp, source_landmarks, output_landmarks, \
'-R', template, output+'Warp'+ext, output+'Affine.txt', '--use-NN'])
if verbose: print(args); print(''); p = call(args, shell="True")
else:
if not os.path.exists(source):
raise NameError('Check input file ' + source)
elif not os.path.exists(template):
raise NameError('Check input file ' + template)
elif not os.path.exists(xfm_dir):
raise NameError('Check input file ' + xfm_dir)
#--------------------------------------------------------------
# Register landmarks to transformed landmarks in template space
#--------------------------------------------------------------
if register_landmarks_to_template:
for file in source_files:
source = brain_dir+file+ext
source_landmarks = landmarks_dir+file+ext
for file2 in target_files:
if file2 != file:
template_landmarks = xfm_landmarks_dir+file2+'_to_template_'+landmark_type+ext
output_xfm = xfm_dir+file+'_to_'+file2+'_in_template_space_'+landmark_type+ext
if os.path.exists(source) and os.path.exists(template) and \
os.path.exists(source_landmarks) and os.path.exists(template_landmarks):
# Intensity similarity:
intensity = [template, source, intensity_weight, intensity_setting]
intensity = " -m "+intensity_measure+"[" + ", ".join([str(s) for s in intensity]) + "]"
# Landmark similarity:
lm_args1 = [template, source, template_landmarks, source_landmarks,
landmark_weight1, percent, sigma, boundary, neighbor, matching_iter]
landmarks1 = ", ".join([" -m PSE[" + ", ".join([str(s) for s in lm_args1]) + "]"])
lm_args2 = [template_landmarks, source_landmarks, landmark_weight2, 0]
landmarks2 = " ".join([" -m MSQ[" + ", ".join([str(s) for s in lm_args2]) + "]"])
#
# Run command
#
args = " ".join([warp, '-o', output_xfm, regularize, intensity, landmarks1, landmarks2])
if verbose: print(args); print(''); p = call(args, shell="True")
else:
if not os.path.exists(source):
raise NameError('Check input file ' + source)
elif not os.path.exists(template):
raise NameError('Check input file ' + template)
elif not os.path.exists(source_landmarks):
raise NameError('Check input file ' + source_landmarks)
elif not os.path.exists(template_landmarks):
raise NameError('Check input file ' + template_landmarks)
#----------------------------------------------
# Apply intensity-based registration transforms
# to register brains to each other via template
#----------------------------------------------
if transform_pairs_via_template:
if evaluate_with_atlases:
avg_results_file = results_dir+'dice_jacc_overlaps.txt'
f_avg = open(avg_results_file, 'w');
for file in source_files:
source = brain_dir+file+ext
for file2 in target_files:
if file2 != file:
target = brain_dir+file2+ext
if os.path.exists(brain_dir+file+ext) and \
os.path.exists(brain_dir+file2+ext) and \
os.path.exists(xfm_dir+file+'_to_templateWarp.nii.gz'):
output_stem = file + '_to_' + file2
# Transform brains
args = " ".join([ANTSPATH+'WarpImageMultiTransform', str(dim), \
source, xfm_brain_dir+output_stem+ext, '-R',target, \
'-i', xfm_dir+file2+'_to_templateAffine.txt', \
xfm_dir+file2+'_to_templateInverseWarp.nii.gz', \
xfm_dir+file+'_to_templateWarp.nii.gz', \
xfm_dir+file+'_to_templateAffine.txt'])
#if verbose: print(args); print(''); p = call(args, shell="True")
if evaluate_with_atlases:
# Transform atlases
source_labels = atlas_dir+file+ext
target_labels = atlas_dir+file2+ext
args = " ".join([ANTSPATH+'WarpImageMultiTransform', str(dim), \
source_labels, xfm_atlas_dir+output_stem+ext, '-R', target_labels, \
'-i', xfm_dir+file2+'_to_templateAffine.txt', \
xfm_dir+file2+'_to_templateInverseWarp.nii.gz', \
xfm_dir+file+'_to_templateWarp.nii.gz', \
xfm_dir+file+'_to_templateAffine.txt','--use-NN'])
#if verbose: print(args); print(''); p = call(args, shell="True")
# Fill target atlas mask with transformed source atlas labels
args = " ".join(['ImageMath', str(dim), xfm_atlas_dir+output_stem+'_filled'+ext, \
'PropagateLabelsThroughMask', brainmask_dir+file2+ext, \
xfm_atlas_dir+output_stem+ext])
#if verbose: print(args); print(''); p = call(args, shell="True")
# Measure overlap of target atlas and transformed source atlas labels
results_file = results_dir+output_stem+'.txt'
f_eval = open(results_file, 'w');
average_dice = 0
average_jacc = 0
print(results_file)
for label in labels:
args = " ".join(['c3d', xfm_atlas_dir+output_stem+'_filled'+ext, \
atlas_dir+file2+ext, '-overlap', str(label), \
'>'+results_dir+'temp_overlap.txt'])
p = call(args, shell="True")
f = open(results_dir+'temp_overlap.txt','r')
temp = f.read()
if temp != '':
dice = float(temp.split()[-2].split(',')[0])
jacc = float(temp.split()[-1].split(',')[0])
else:
dice = 0.0
jacc = 0.0
if isnan(dice):
dice = 0.0
if isnan(jacc):
jacc = 0.0
print_out = ' '.join(['Label:', str(label), 'Dice:', str(dice), \
'Jaccard:', str(jacc)])
print(print_out)
f_eval.close()
f_eval = open(results_file, 'a')
f_eval.write(print_out + '\n')
average_dice += dice
average_jacc += jacc
average_dice = average_dice/len(labels)
average_jacc = average_jacc/len(labels)
print_out1 = 'Average Dice: ' + str(average_dice)
print_out2 = 'Average Jacc: ' + str(average_jacc)
print(print_out1);
print(print_out2)
f_eval.close()
f_eval = open(results_file, 'a')
f_eval.write(print_out1 + '\n' + print_out2 + '\n')
f_eval.close()
f_avg.close()
f_avg = open(avg_results_file, 'a');
f_avg.write(output_stem + ' ' + str(average_dice) + ' ' + str(average_jacc) + '\n')
else:
if not os.path.exists(brain_dir+file+ext):
raise NameError('Check input file ' + brain_dir+file+ext)
elif not os.path.exists(brain_dir+file2+ext):
raise NameError('Check input file ' + brain_dir+file2+ext)
elif not os.path.exists(xfm_dir+file+'Warp.nii.gz'):
raise NameError('Check input file ' + xfm_dir+file+'Warp.nii.gz')
if evaluate_with_atlases:
f_avg.close()
#----------------------------------------------
# Apply landmark-driven registration transforms
# to register brains to each other via template
#----------------------------------------------
if transform_landmarks_via_template:
if evaluate_with_atlases:
avg_results_file = results_dir+'dice_jacc_overlaps_'+landmark_type+'.txt'
f_avg = open(avg_results_file, 'w');
for file in source_files:
source = brain_dir+file+ext
source_landmarks = landmarks_dir+file+ext
for file2 in target_files:
if file2 != file:
target = brain_dir+file2+ext
target_landmarks = landmarks_dir+file2+ext
if os.path.exists(source) and \
os.path.exists(target) and \
os.path.exists(source_landmarks) and \
os.path.exists(target_landmarks):
pair = file+'_to_'+file2
inv_pair = file2+'_to_'+file
output_stem = pair+'_'+landmark_type
xfm_stem = xfm_dir+pair+'_in_template_space_'+landmark_type
inv_xfm_stem = xfm_dir+inv_pair+'_in_template_space_'+landmark_type
# Transform brains
if not os.path.exists(xfm_brain_dir+output_stem+ext):
args = " ".join([ANTSPATH+'WarpImageMultiTransform', str(dim), \
source, xfm_brain_dir+output_stem+ext, '-R', target, \
'-i', inv_xfm_stem+'Affine.txt', \
inv_xfm_stem+'InverseWarp.nii.gz', \
xfm_stem+'Warp.nii.gz', \
xfm_stem+'Affine.txt'])
if verbose: print(args); print(''); p = call(args, shell="True")
# Transform landmarks
if not os.path.exists(xfm_landmarks_dir+output_stem+ext):
args = " ".join([ANTSPATH+'WarpImageMultiTransform', str(dim), \
source_landmarks, xfm_landmarks_dir+output_stem+ext, '-R',target_landmarks, \
'-i', inv_xfm_stem+'Affine.txt', \
inv_xfm_stem+'InverseWarp.nii.gz', \
xfm_stem+'Warp.nii.gz', \
xfm_stem+'Affine.txt','--use-NN'])
if verbose: print(args); print(''); p = call(args, shell="True")
if evaluate_with_atlases:
if not os.path.exists(xfm_atlas_dir+output_stem+ext):
if not os.path.exists(results_dir+output_stem+'.txt'):
# Transform atlases
source_labels = atlas_dir+file+ext
target_labels = atlas_dir+file2+ext
args = " ".join([ANTSPATH+'WarpImageMultiTransform', str(dim), \
source_labels, xfm_atlas_dir+output_stem+ext, '-R', target_labels, \
'-i', inv_xfm_stem+'Affine.txt', \
inv_xfm_stem+'InverseWarp.nii.gz', \
xfm_stem+'Warp.nii.gz', \
xfm_stem+'Affine.txt','--use-NN'])
if verbose: print(args); print(''); p = call(args, shell="True")
# Fill target atlas mask with transformed source atlas labels
args = " ".join(['ImageMath', str(dim), xfm_atlas_dir+output_stem+'_filled'+ext, \
'PropagateLabelsThroughMask', brainmask_dir+file2+ext, \
xfm_atlas_dir+output_stem+ext])
if verbose: print(args); print(''); p = call(args, shell="True")
# Measure overlap of target atlas and transformed source atlas labels
results_file = results_dir+output_stem+'.txt'
f_eval = open(results_file, 'w');
average_dice = 0
average_jacc = 0
for label in labels:
args = " ".join(['c3d', xfm_atlas_dir+output_stem+'_filled'+ext, \
atlas_dir+file2+ext, '-overlap', str(label), \
'>'+results_dir+'temp_overlap.txt'])
p = call(args, shell="True")
f = open(results_dir+'temp_overlap.txt','r')
temp = f.read()
dice = 0
jacc = 0
if temp != '':
dice = float(temp.split()[-2].split(',')[0])
jacc = float(temp.split()[-1].split(',')[0])
print_out = " ".join(['Label:', str(label), 'Dice:', str(dice), \
'Jaccard:', str(jacc)])
print(print_out)
f_eval.close()
f_eval = open(results_file, 'a')
f_eval.write(print_out + '\n')
if isnan(dice):
dice = 0
if isnan(jacc):
jacc = 0
average_dice += dice
average_jacc += jacc
average_dice = average_dice/len(labels)
average_jacc = average_jacc/len(labels)
print_out1 = 'Average Dice: ' + str(average_dice)
print_out2 = 'Average Jacc: ' + str(average_jacc)
print(print_out1);
print(print_out2)
f_eval.close()
f_eval = open(results_file, 'a')
f_eval.write('\n' + print_out1 + '\n' + print_out2 + '\n\n')
f_eval.close()
f_avg.close()
f_avg = open(avg_results_file, 'a');
f_avg.write(output_stem + ' ' + str(average_dice) + ' ' + str(average_jacc) + '\n')
else:
if not os.path.exists(source_landmarks):
raise NameError('Check input file ' + source_landmarks)
elif not os.path.exists(target_landmarks):
raise NameError('Check input file ' + target_landmarks)
if evaluate_with_atlases:
f_avg.close()
| 50.672249 | 108 | 0.507153 | #!/usr/bin/python
"""
Register brains, landmarks, and labels to a template.
(c) 2011, @rno klein
"""
import os
from os.path import exists
from subprocess import call
from numpy import float, isnan
# Run intensity-based registration
# 1. Register brains to template
# 2. Transform brains to each other via template
# 3. Transform landmarks to template
register_to_template = 1
transform_pairs_via_template = 1
transform_landmarks_to_template = 0
# Run landmark-driven registration to template:
register_landmarks_to_template = 0
transform_landmarks_via_template = 0
# Atlas-based evaluation for the above settings:
# 1. prepare target atlas mask
# 2. transform source atlas
# 3. fill #1 with #2
# 4. measure overlap of #3 with target atlas labels
prepare_target_mask = 0
evaluate_with_atlases = 1
verbose = 1
dim = 3
#
# Files
#
source_files = ['m1','m2','m3','m4','m5','m6','m7','m8','m9','m10','m11','m12']
target_files = ['m1','m2','m3','m4','m5','m6','m7','m8','m9','m10','m11','m12']
#source_files = ['m1','m2','m3','m4']#,'m5','m6']
#target_files = ['m1','m2','m3','m4']#,'m5','m6']
ANTSPATH = os.environ.get("ANTSPATH")
FSLPATH = '/usr/local/fsl/bin/'
out_path = '/hd2/Archive/registration_evaluation_2011_output/'
xfm_dir = os.path.join( out_path, 'Transforms/')
xfm_brain_dir = os.path.join( out_path, 'Transformed_Brains/')
xfm_landmarks_dir = os.path.join( out_path, 'Transformed_Landmarks/')
xfm_atlas_dir = os.path.join( out_path, 'Transformed_Atlases/')
atlas_dir = '/hd2/Brains/CUMC12/Atlases/'
brain_dir = '/hd2/Brains/CUMC12/Brains/'
brainmask_dir = '/hd2/Brains/CUMC12/BrainMasks/'
ext = '.nii.gz'
template = '/hd2/Brains/CUMC12/CUMC12template.nii.gz'
landmarks_dir = '/hd2/Brains/CUMC12/Landmarks/pits_kiho_im_binary/'
landmark_type = 'pits_kiho_im'
landmarks_dir = '/hd2/Brains/CUMC12/Landmarks/pits_yrjo_hame_binary/'
landmark_type = 'pits_yrjo_hame'
landmarks_dir = '/hd2/Brains/CUMC12/Landmarks/pits_forrest_bao_binary/'
landmark_type = 'pits_forrest_bao'
landmarks_dir = '/hd2/Brains/CUMC12/Landmarks/ribbons_brain_visa_binary/'
landmark_type = 'ribbons_brain_visa'
landmarks_dir = '/hd2/Brains/CUMC12/Landmarks/fundi_gang_li_binary/'
landmark_type = 'fundi_gang_li'
landmarks_dir = '/hd2/Brains/CUMC12/Landmarks/fundi_brain_visa_binary/'
landmark_type = 'fundi_brain_visa'
landmarks_dir = '/hd2/Brains/CUMC12/Landmarks/fundi_forrest_bao_binary/'
landmark_type = 'fundi_forrest_bao'
results_dir = os.path.join( out_path, 'Results/')
label_file = 'CUMC12_labels_regions.txt'
#
# Registration parameters
#
gradient_step_size = 0.5
iterations = "30x100x10"
options = " --use-Histogram-Matching"
initialize = " --number-of-affine-iterations 10000x10000x10000x10000x10000"
warp = ANTSPATH + "ANTS " + str(dim) + " -t SyN[" + str(gradient_step_size) +"] -i " + \
str(iterations) + options + initialize
apply_warp = ANTSPATH + "WarpImageMultiTransform " + str(dim)
#
# Regularization parameters
#
regularizer = "Gauss"
regularizer_setting = 3
deformation_field_sigma = 0
regularize = "-r Gauss[" + str(regularizer_setting) + ", " + \
str(deformation_field_sigma) + "]"
#
# Intensity parameters
#
intensity_measure = "CC"
intensity_weight = 1.0
intensity_setting = 3
#
# Landmark parameters
#
landmark_measure1 = "PSE"
landmark_measure2 = "MSQ"
landmark_weight1 = 0.1
landmark_weight2 = 0.1
percent = 1.0 # real number: 1.0 = 100%
boundary = 0 # 0: not only boundaries
sigma = 10
neighbor = 100
matching_iter = 100000 # partial matching iterations
if evaluate_with_atlases:
f = open(label_file,'r')
label_table = f.readlines()
f.close()
labels = []
for row in label_table:
labels.append(int(row.split()[0]))
#------------------------------------------
# Register brains and landmarks to template
#------------------------------------------
if register_to_template + transform_landmarks_to_template + \
prepare_target_mask > 0:
for file in source_files:
source = brain_dir+file+ext
output = xfm_dir+file+'_to_template'
out = '-o ' + output+ext
if os.path.exists(source) and os.path.exists(template) and os.path.exists(xfm_dir):
# Intensity-based registration to template:
if register_to_template:
intensity = [template, source, intensity_weight, intensity_setting]
intensity = "-m "+intensity_measure+"[" + ", ".join([str(s) for s in intensity]) + "]"
args = " ".join([warp, regularize, intensity, out])
if verbose: print(args); print(''); p = call(args, shell="True")
# Prepare binary (target atlas) masks for filling with labels:
if prepare_target_mask:
args = " ".join(['c3d', atlas_dir+file+ext, '-binarize -o', brainmask_dir+file+ext])
if verbose: print(args); print(''); p = call(args, shell="True")
# Transform landmarks to template space:
if transform_landmarks_to_template:
source_landmarks = landmarks_dir+file+ext
output_landmarks = xfm_landmarks_dir+file+'_to_template_'+landmark_type+ext
try:
os.path.exists(source_landmarks) and os.path.exists(xfm_landmarks_dir)
except:
raise NameError('Check ' + source_landmarks + ' and ' + xfm_landmarks_dir)
args = " ".join([apply_warp, source_landmarks, output_landmarks, \
'-R', template, output+'Warp'+ext, output+'Affine.txt', '--use-NN'])
if verbose: print(args); print(''); p = call(args, shell="True")
else:
if not os.path.exists(source):
raise NameError('Check input file ' + source)
elif not os.path.exists(template):
raise NameError('Check input file ' + template)
elif not os.path.exists(xfm_dir):
raise NameError('Check input file ' + xfm_dir)
#--------------------------------------------------------------
# Register landmarks to transformed landmarks in template space
#--------------------------------------------------------------
if register_landmarks_to_template:
for file in source_files:
source = brain_dir+file+ext
source_landmarks = landmarks_dir+file+ext
for file2 in target_files:
if file2 != file:
template_landmarks = xfm_landmarks_dir+file2+'_to_template_'+landmark_type+ext
output_xfm = xfm_dir+file+'_to_'+file2+'_in_template_space_'+landmark_type+ext
if os.path.exists(source) and os.path.exists(template) and \
os.path.exists(source_landmarks) and os.path.exists(template_landmarks):
# Intensity similarity:
intensity = [template, source, intensity_weight, intensity_setting]
intensity = " -m "+intensity_measure+"[" + ", ".join([str(s) for s in intensity]) + "]"
# Landmark similarity:
lm_args1 = [template, source, template_landmarks, source_landmarks,
landmark_weight1, percent, sigma, boundary, neighbor, matching_iter]
landmarks1 = ", ".join([" -m PSE[" + ", ".join([str(s) for s in lm_args1]) + "]"])
lm_args2 = [template_landmarks, source_landmarks, landmark_weight2, 0]
landmarks2 = " ".join([" -m MSQ[" + ", ".join([str(s) for s in lm_args2]) + "]"])
#
# Run command
#
args = " ".join([warp, '-o', output_xfm, regularize, intensity, landmarks1, landmarks2])
if verbose: print(args); print(''); p = call(args, shell="True")
else:
if not os.path.exists(source):
raise NameError('Check input file ' + source)
elif not os.path.exists(template):
raise NameError('Check input file ' + template)
elif not os.path.exists(source_landmarks):
raise NameError('Check input file ' + source_landmarks)
elif not os.path.exists(template_landmarks):
raise NameError('Check input file ' + template_landmarks)
#----------------------------------------------
# Apply intensity-based registration transforms
# to register brains to each other via template
#----------------------------------------------
if transform_pairs_via_template:
if evaluate_with_atlases:
avg_results_file = results_dir+'dice_jacc_overlaps.txt'
f_avg = open(avg_results_file, 'w');
for file in source_files:
source = brain_dir+file+ext
for file2 in target_files:
if file2 != file:
target = brain_dir+file2+ext
if os.path.exists(brain_dir+file+ext) and \
os.path.exists(brain_dir+file2+ext) and \
os.path.exists(xfm_dir+file+'_to_templateWarp.nii.gz'):
output_stem = file + '_to_' + file2
# Transform brains
args = " ".join([ANTSPATH+'WarpImageMultiTransform', str(dim), \
source, xfm_brain_dir+output_stem+ext, '-R',target, \
'-i', xfm_dir+file2+'_to_templateAffine.txt', \
xfm_dir+file2+'_to_templateInverseWarp.nii.gz', \
xfm_dir+file+'_to_templateWarp.nii.gz', \
xfm_dir+file+'_to_templateAffine.txt'])
#if verbose: print(args); print(''); p = call(args, shell="True")
if evaluate_with_atlases:
# Transform atlases
source_labels = atlas_dir+file+ext
target_labels = atlas_dir+file2+ext
args = " ".join([ANTSPATH+'WarpImageMultiTransform', str(dim), \
source_labels, xfm_atlas_dir+output_stem+ext, '-R', target_labels, \
'-i', xfm_dir+file2+'_to_templateAffine.txt', \
xfm_dir+file2+'_to_templateInverseWarp.nii.gz', \
xfm_dir+file+'_to_templateWarp.nii.gz', \
xfm_dir+file+'_to_templateAffine.txt','--use-NN'])
#if verbose: print(args); print(''); p = call(args, shell="True")
# Fill target atlas mask with transformed source atlas labels
args = " ".join(['ImageMath', str(dim), xfm_atlas_dir+output_stem+'_filled'+ext, \
'PropagateLabelsThroughMask', brainmask_dir+file2+ext, \
xfm_atlas_dir+output_stem+ext])
#if verbose: print(args); print(''); p = call(args, shell="True")
# Measure overlap of target atlas and transformed source atlas labels
results_file = results_dir+output_stem+'.txt'
f_eval = open(results_file, 'w');
average_dice = 0
average_jacc = 0
print(results_file)
for label in labels:
args = " ".join(['c3d', xfm_atlas_dir+output_stem+'_filled'+ext, \
atlas_dir+file2+ext, '-overlap', str(label), \
'>'+results_dir+'temp_overlap.txt'])
p = call(args, shell="True")
f = open(results_dir+'temp_overlap.txt','r')
temp = f.read()
if temp != '':
dice = float(temp.split()[-2].split(',')[0])
jacc = float(temp.split()[-1].split(',')[0])
else:
dice = 0.0
jacc = 0.0
if isnan(dice):
dice = 0.0
if isnan(jacc):
jacc = 0.0
print_out = ' '.join(['Label:', str(label), 'Dice:', str(dice), \
'Jaccard:', str(jacc)])
print(print_out)
f_eval.close()
f_eval = open(results_file, 'a')
f_eval.write(print_out + '\n')
average_dice += dice
average_jacc += jacc
average_dice = average_dice/len(labels)
average_jacc = average_jacc/len(labels)
print_out1 = 'Average Dice: ' + str(average_dice)
print_out2 = 'Average Jacc: ' + str(average_jacc)
print(print_out1);
print(print_out2)
f_eval.close()
f_eval = open(results_file, 'a')
f_eval.write(print_out1 + '\n' + print_out2 + '\n')
f_eval.close()
f_avg.close()
f_avg = open(avg_results_file, 'a');
f_avg.write(output_stem + ' ' + str(average_dice) + ' ' + str(average_jacc) + '\n')
else:
if not os.path.exists(brain_dir+file+ext):
raise NameError('Check input file ' + brain_dir+file+ext)
elif not os.path.exists(brain_dir+file2+ext):
raise NameError('Check input file ' + brain_dir+file2+ext)
elif not os.path.exists(xfm_dir+file+'Warp.nii.gz'):
raise NameError('Check input file ' + xfm_dir+file+'Warp.nii.gz')
if evaluate_with_atlases:
f_avg.close()
#----------------------------------------------
# Apply landmark-driven registration transforms
# to register brains to each other via template
#----------------------------------------------
if transform_landmarks_via_template:
if evaluate_with_atlases:
avg_results_file = results_dir+'dice_jacc_overlaps_'+landmark_type+'.txt'
f_avg = open(avg_results_file, 'w');
for file in source_files:
source = brain_dir+file+ext
source_landmarks = landmarks_dir+file+ext
for file2 in target_files:
if file2 != file:
target = brain_dir+file2+ext
target_landmarks = landmarks_dir+file2+ext
if os.path.exists(source) and \
os.path.exists(target) and \
os.path.exists(source_landmarks) and \
os.path.exists(target_landmarks):
pair = file+'_to_'+file2
inv_pair = file2+'_to_'+file
output_stem = pair+'_'+landmark_type
xfm_stem = xfm_dir+pair+'_in_template_space_'+landmark_type
inv_xfm_stem = xfm_dir+inv_pair+'_in_template_space_'+landmark_type
# Transform brains
if not os.path.exists(xfm_brain_dir+output_stem+ext):
args = " ".join([ANTSPATH+'WarpImageMultiTransform', str(dim), \
source, xfm_brain_dir+output_stem+ext, '-R', target, \
'-i', inv_xfm_stem+'Affine.txt', \
inv_xfm_stem+'InverseWarp.nii.gz', \
xfm_stem+'Warp.nii.gz', \
xfm_stem+'Affine.txt'])
if verbose: print(args); print(''); p = call(args, shell="True")
# Transform landmarks
if not os.path.exists(xfm_landmarks_dir+output_stem+ext):
args = " ".join([ANTSPATH+'WarpImageMultiTransform', str(dim), \
source_landmarks, xfm_landmarks_dir+output_stem+ext, '-R',target_landmarks, \
'-i', inv_xfm_stem+'Affine.txt', \
inv_xfm_stem+'InverseWarp.nii.gz', \
xfm_stem+'Warp.nii.gz', \
xfm_stem+'Affine.txt','--use-NN'])
if verbose: print(args); print(''); p = call(args, shell="True")
if evaluate_with_atlases:
if not os.path.exists(xfm_atlas_dir+output_stem+ext):
if not os.path.exists(results_dir+output_stem+'.txt'):
# Transform atlases
source_labels = atlas_dir+file+ext
target_labels = atlas_dir+file2+ext
args = " ".join([ANTSPATH+'WarpImageMultiTransform', str(dim), \
source_labels, xfm_atlas_dir+output_stem+ext, '-R', target_labels, \
'-i', inv_xfm_stem+'Affine.txt', \
inv_xfm_stem+'InverseWarp.nii.gz', \
xfm_stem+'Warp.nii.gz', \
xfm_stem+'Affine.txt','--use-NN'])
if verbose: print(args); print(''); p = call(args, shell="True")
# Fill target atlas mask with transformed source atlas labels
args = " ".join(['ImageMath', str(dim), xfm_atlas_dir+output_stem+'_filled'+ext, \
'PropagateLabelsThroughMask', brainmask_dir+file2+ext, \
xfm_atlas_dir+output_stem+ext])
if verbose: print(args); print(''); p = call(args, shell="True")
# Measure overlap of target atlas and transformed source atlas labels
results_file = results_dir+output_stem+'.txt'
f_eval = open(results_file, 'w');
average_dice = 0
average_jacc = 0
for label in labels:
args = " ".join(['c3d', xfm_atlas_dir+output_stem+'_filled'+ext, \
atlas_dir+file2+ext, '-overlap', str(label), \
'>'+results_dir+'temp_overlap.txt'])
p = call(args, shell="True")
f = open(results_dir+'temp_overlap.txt','r')
temp = f.read()
dice = 0
jacc = 0
if temp != '':
dice = float(temp.split()[-2].split(',')[0])
jacc = float(temp.split()[-1].split(',')[0])
print_out = " ".join(['Label:', str(label), 'Dice:', str(dice), \
'Jaccard:', str(jacc)])
print(print_out)
f_eval.close()
f_eval = open(results_file, 'a')
f_eval.write(print_out + '\n')
if isnan(dice):
dice = 0
if isnan(jacc):
jacc = 0
average_dice += dice
average_jacc += jacc
average_dice = average_dice/len(labels)
average_jacc = average_jacc/len(labels)
print_out1 = 'Average Dice: ' + str(average_dice)
print_out2 = 'Average Jacc: ' + str(average_jacc)
print(print_out1);
print(print_out2)
f_eval.close()
f_eval = open(results_file, 'a')
f_eval.write('\n' + print_out1 + '\n' + print_out2 + '\n\n')
f_eval.close()
f_avg.close()
f_avg = open(avg_results_file, 'a');
f_avg.write(output_stem + ' ' + str(average_dice) + ' ' + str(average_jacc) + '\n')
else:
if not os.path.exists(source_landmarks):
raise NameError('Check input file ' + source_landmarks)
elif not os.path.exists(target_landmarks):
raise NameError('Check input file ' + target_landmarks)
if evaluate_with_atlases:
f_avg.close()
| 0 | 0 | 0 |
74727a6c79fe3a6e89dbf9ba47e01f093de21c57 | 574 | py | Python | EfficientCoding/Assignment-1-equilibriumindex.py | vikbehal/Explore | b35948d8a6894647df3ee462746475f7e66f78f8 | [
"MIT"
] | 3 | 2019-01-29T06:33:34.000Z | 2022-01-26T20:01:04.000Z | EfficientCoding/Assignment-1-equilibriumindex.py | vikbehal/Explore | b35948d8a6894647df3ee462746475f7e66f78f8 | [
"MIT"
] | null | null | null | EfficientCoding/Assignment-1-equilibriumindex.py | vikbehal/Explore | b35948d8a6894647df3ee462746475f7e66f78f8 | [
"MIT"
] | 1 | 2022-03-11T10:47:29.000Z | 2022-03-11T10:47:29.000Z |
#inputString = input()
inputString = "3,-4, 2, -1,-3, 2, 1"
inputList = [int(val) for val in inputString.split(",")]
print(solve(inputList)) | 22.076923 | 57 | 0.557491 | def solve(inputList):
# Edge cases
if sum(inputList[1:]) == 0:
return 0
totalItems = len(inputList)
if sum(inputList[:totalItems - 1]) == 0:
return totalItems - 1
leftSum = 0
rightSum = sum(inputList)
for idx, item in enumerate(inputList):
rightSum -= item
if leftSum == rightSum:
return idx
leftSum += item
return -1
#inputString = input()
inputString = "3,-4, 2, -1,-3, 2, 1"
inputList = [int(val) for val in inputString.split(",")]
print(solve(inputList)) | 404 | 0 | 23 |
90e2e88ba073a5e73d350beeba17dadc19ce1da1 | 390 | py | Python | rplugin/python3/defx/session.py | roachsinai/defx.nvim | 609e858b5211ec4de45cb93045ab320c961048b2 | [
"MIT"
] | 1,229 | 2017-01-03T13:21:59.000Z | 2022-03-29T06:33:20.000Z | rplugin/python3/defx/session.py | roachsinai/defx.nvim | 609e858b5211ec4de45cb93045ab320c961048b2 | [
"MIT"
] | 295 | 2017-12-07T02:38:55.000Z | 2022-03-29T00:30:52.000Z | rplugin/python3/defx/session.py | roachsinai/defx.nvim | 609e858b5211ec4de45cb93045ab320c961048b2 | [
"MIT"
] | 136 | 2018-07-20T02:57:54.000Z | 2022-03-01T00:30:36.000Z | # ============================================================================
# FILE: session.py
# AUTHOR: Shougo Matsushita <Shougo.Matsu at gmail.com>
# License: MIT license
# ============================================================================
import typing
| 27.857143 | 78 | 0.402564 | # ============================================================================
# FILE: session.py
# AUTHOR: Shougo Matsushita <Shougo.Matsu at gmail.com>
# License: MIT license
# ============================================================================
import typing
class Session(typing.NamedTuple):
name: str = ''
path: str = ''
opened_candidates: typing.List[str] = []
| 0 | 95 | 23 |
6ee29bfe43fc37be3cb3d8d63953bf79d532f4aa | 1,973 | py | Python | vae.py | chauhankaranraj/ELDR-pytorch | 99d40186e960933e6f0ae18270530f3534741296 | [
"MIT"
] | null | null | null | vae.py | chauhankaranraj/ELDR-pytorch | 99d40186e960933e6f0ae18270530f3534741296 | [
"MIT"
] | null | null | null | vae.py | chauhankaranraj/ELDR-pytorch | 99d40186e960933e6f0ae18270530f3534741296 | [
"MIT"
] | null | null | null | # NOTE: this code is currently copypasta'd from pytorch official examples repo at
# https://github.com/pytorch/examples/blob/master/vae/main.py
# In the future, this could probably be added as a submodule
import pdb
import torch
import torch.nn as nn
import torch.nn.functional as F
# Reconstruction + KL divergence losses summed over all elements and batch
| 34.017241 | 81 | 0.660416 | # NOTE: this code is currently copypasta'd from pytorch official examples repo at
# https://github.com/pytorch/examples/blob/master/vae/main.py
# In the future, this could probably be added as a submodule
import pdb
import torch
import torch.nn as nn
import torch.nn.functional as F
class VAE(nn.Module):
def __init__(self, input_ndim, output_ndim):
super(VAE, self).__init__()
self.fc1 = nn.Linear(input_ndim, 64)
self.fc21 = nn.Linear(64, output_ndim)
self.fc22 = nn.Linear(64, output_ndim)
self.fc3 = nn.Linear(output_ndim, 64)
self.fc4 = nn.Linear(64, input_ndim)
def encode(self, x):
h1 = F.relu(self.fc1(x))
return self.fc21(h1), self.fc22(h1)
def reparameterize(self, mu, logvar):
std = torch.exp(0.5*logvar)
eps = torch.randn_like(std)
return mu + eps*std
def decode(self, z):
h3 = F.relu(self.fc3(z))
return torch.sigmoid(self.fc4(h3))
def forward(self, x):
mu, logvar = self.encode(x)
z = self.reparameterize(mu, logvar)
return self.decode(z), mu, logvar
# Reconstruction + KL divergence losses summed over all elements and batch
def loss_function(recon_x, x, mu, logvar):
BCE = F.binary_cross_entropy(recon_x, x, reduction='sum')
# see Appendix B from VAE paper:
# Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014
# https://arxiv.org/abs/1312.6114
# 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
# return BCE + KLD
# FIXME: bce + kld doesnt seem to converge to a meaningful representation
# for some reason.
# Using smooth l1 loss seems to fix this but we would be completely omitting
# the kld loss which doesnt sound like a good idea. Nevertheless, for we'll
# use this purely for convergence reasons for iris dataset
return F.smooth_l1_loss(recon_x, x, reduction='sum')
| 1,432 | 0 | 180 |
5e2f291dcf29e2e8f32139edb721bac8de6f799d | 664 | py | Python | test/unit/devices/test_iosxr.py | NorthLandTeam/ncclient | ff6bba74c3304f0a5053087449f5a51e8eb13ed4 | [
"Apache-2.0"
] | 498 | 2015-10-21T18:43:23.000Z | 2022-03-29T17:27:59.000Z | test/unit/devices/test_iosxr.py | NorthLandTeam/ncclient | ff6bba74c3304f0a5053087449f5a51e8eb13ed4 | [
"Apache-2.0"
] | 440 | 2015-09-07T23:43:01.000Z | 2022-03-17T11:43:16.000Z | test/unit/devices/test_iosxr.py | NorthLandTeam/ncclient | ff6bba74c3304f0a5053087449f5a51e8eb13ed4 | [
"Apache-2.0"
] | 330 | 2015-09-10T16:53:50.000Z | 2022-03-31T12:24:53.000Z | import unittest
from ncclient.devices.iosxr import *
| 30.181818 | 69 | 0.712349 | import unittest
from ncclient.devices.iosxr import *
class TestIosxrDevice(unittest.TestCase):
def setUp(self):
self.obj = IosxrDeviceHandler({'name': 'iosxe'})
def test_add_additional_ssh_connect_params(self):
expected = dict()
expected["unknown_host_cb"] = iosxr_unknown_host_cb
actual = dict()
self.obj.add_additional_ssh_connect_params(actual)
self.assertDictEqual(expected, actual)
def test_perform_qualify_check(self):
self.assertFalse(self.obj.perform_qualify_check())
def test_csr_unknown_host_cb(self):
self.assertTrue(iosxr_unknown_host_cb('host', 'fingerprint'))
| 455 | 20 | 135 |
35744b0cc75c7d6a7b087efb811ee192fa489500 | 318 | py | Python | onfido/apps.py | snicks1/django-onfido | 7288552c6a156d022539d4d22d7f5a0236018ada | [
"MIT"
] | null | null | null | onfido/apps.py | snicks1/django-onfido | 7288552c6a156d022539d4d22d7f5a0236018ada | [
"MIT"
] | null | null | null | onfido/apps.py | snicks1/django-onfido | 7288552c6a156d022539d4d22d7f5a0236018ada | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from django.apps import AppConfig
class OnfidoAppConfig(AppConfig):
"""AppConfig for Django-Onfido."""
name = 'onfido'
verbose_name = "Onfido"
configs = []
def ready(self):
"""Validate config and connect signals."""
super(OnfidoAppConfig, self).ready()
| 19.875 | 50 | 0.625786 | # -*- coding: utf-8 -*-
from django.apps import AppConfig
class OnfidoAppConfig(AppConfig):
"""AppConfig for Django-Onfido."""
name = 'onfido'
verbose_name = "Onfido"
configs = []
def ready(self):
"""Validate config and connect signals."""
super(OnfidoAppConfig, self).ready()
| 0 | 0 | 0 |
6f54454045f345a91f5cdf3456d3e88496e7aada | 8,242 | py | Python | bob/bio/face/database/pola_thermal.py | bioidiap/bob.bio.face | 2341e6423ca5a412ebe23fa18acacd69ea1ef914 | [
"BSD-3-Clause"
] | 4 | 2016-09-01T13:16:46.000Z | 2021-09-03T03:27:18.000Z | bob/bio/face/database/pola_thermal.py | bioidiap/bob.bio.face | 2341e6423ca5a412ebe23fa18acacd69ea1ef914 | [
"BSD-3-Clause"
] | 6 | 2015-09-02T19:31:15.000Z | 2016-10-10T21:48:39.000Z | bob/bio/face/database/pola_thermal.py | bioidiap/bob.bio.face | 2341e6423ca5a412ebe23fa18acacd69ea1ef914 | [
"BSD-3-Clause"
] | 6 | 2015-10-07T17:18:48.000Z | 2017-07-18T19:41:14.000Z | #!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
"""
PolaThermal database: database implementation
"""
from bob.bio.base.database import CSVDataset
from bob.bio.base.database import CSVToSampleLoaderBiometrics
from bob.bio.face.database.sample_loaders import EyesAnnotations
from bob.extension import rc
from bob.extension.download import get_file
import bob.io.base
from sklearn.pipeline import make_pipeline
class PolaThermalDatabase(CSVDataset):
"""
Collected by USA Army, the Polarimetric Thermal Database contains basically VIS and Thermal face images.
Follow bellow the description of the imager used to capture this device.
The **polarimetric** LWIR imager used to collect this database was developed by Polaris Sensor Technologies.
The imager is based on the division-of-time spinning achromatic retarder (SAR) design that uses a spinning phase-retarder mounted in series with a linear wire-grid polarizer.
This system, also referred to as a polarimeter, has a spectral response range of 7.5-11.1, using a Stirling-cooled mercury telluride focal plane array with pixel array dimensions of 640×480.
A Fourier modulation technique is applied to the pixel readout, followed by a series expansion and inversion to compute the Stokes images.
Data were recorded at 60 frames per second (fps) for this database, using a wide FOV of 10.6°×7.9°. Prior to collecting data for each subject, a two-point non-uniformity correction (NUC) was performed using a Mikron blackbody at 20°C and 40°C, which covers the range of typical facial temperatures (30°C-35°C).
Data was recorded on a laptop using custom vendor software.
An array of four Basler Scout series cameras was used to collect the corresponding **visible spectrum imagery**.
Two of the cameras are monochrome (model # scA640-70gm), with pixel array dimensions of 659×494.
The other two cameras are color (model # scA640-70gc), with pixel array dimensions of 658×494.
The dataset contains 60 subjects in total.
For **VIS** images (considered only the 87 pixels interpupil distance) there are 4 samples per subject with neutral expression (called baseline condition **B**) and 12 samples per subject varying the facial expression (called expression **E**).
Such variability was introduced by asking the subject to count orally.
In total there are 960 images for this modality.
For the **thermal** images there are 4 types of thermal imagery based on the Stokes parameters (:math:`S_0`, :math:`S_1`, :math:`S_2` and :math:`S_3`) commonly used to represent the polarization state.
The thermal imagery is the following:
- :math:`S_0`: The conventional thermal image
- :math:`S_1`
- :math:`S_2`
- DoLP: The degree-of-linear-polarization (DoLP) describes the portion of an electromagnetic wave that is linearly polarized, as defined :math:`\\frac{sqrt(S_{1}^{2} + S_{2}^{2})}{S_0}`.
Since :math:`S_3` is very small and usually taken to be zero, the authors of the database decided not to provide this part of the data.
The same facial expression variability introduced in **VIS** is introduced for **Thermal** images.
The distance between the subject and the camera is the last source of variability introduced in the thermal images.
There are 3 ranges: R1 (2.5m), R2 (5m) and R3 (7.5m).
In total there are 11,520 images for this modality and for each subject they are split as the following:
+----------------+----------+----------+----------+
| Imagery/Range | R1 (B/E) | R2 (B/E) | R3 (B/E) |
+================+==========+==========+==========+
| :math:`S_0` | 16 (8/8) | 16 (8/8) | 16 (8/8) |
+----------------+----------+----------+----------+
| :math:`S_1` | 16 (8/8) | 16 (8/8) | 16 (8/8) |
+----------------+----------+----------+----------+
| :math:`S_2` | 16 (8/8) | 16 (8/8) | 16 (8/8) |
+----------------+----------+----------+----------+
| DoLP | 16 (8/8) | 16 (8/8) | 16 (8/8) |
+----------------+----------+----------+----------+
.. warning::
Use the command below to set the path of the real data::
$ bob config set bob.db.pola-thermal.directory [PATH-TO-MEDS-DATA]
Parameters
----------
protocol: str
One of the database protocols.
"""
@staticmethod
@staticmethod
| 45.038251 | 314 | 0.609803 | #!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
"""
PolaThermal database: database implementation
"""
from bob.bio.base.database import CSVDataset
from bob.bio.base.database import CSVToSampleLoaderBiometrics
from bob.bio.face.database.sample_loaders import EyesAnnotations
from bob.extension import rc
from bob.extension.download import get_file
import bob.io.base
from sklearn.pipeline import make_pipeline
class PolaThermalDatabase(CSVDataset):
"""
Collected by USA Army, the Polarimetric Thermal Database contains basically VIS and Thermal face images.
Follow bellow the description of the imager used to capture this device.
The **polarimetric** LWIR imager used to collect this database was developed by Polaris Sensor Technologies.
The imager is based on the division-of-time spinning achromatic retarder (SAR) design that uses a spinning phase-retarder mounted in series with a linear wire-grid polarizer.
This system, also referred to as a polarimeter, has a spectral response range of 7.5-11.1, using a Stirling-cooled mercury telluride focal plane array with pixel array dimensions of 640×480.
A Fourier modulation technique is applied to the pixel readout, followed by a series expansion and inversion to compute the Stokes images.
Data were recorded at 60 frames per second (fps) for this database, using a wide FOV of 10.6°×7.9°. Prior to collecting data for each subject, a two-point non-uniformity correction (NUC) was performed using a Mikron blackbody at 20°C and 40°C, which covers the range of typical facial temperatures (30°C-35°C).
Data was recorded on a laptop using custom vendor software.
An array of four Basler Scout series cameras was used to collect the corresponding **visible spectrum imagery**.
Two of the cameras are monochrome (model # scA640-70gm), with pixel array dimensions of 659×494.
The other two cameras are color (model # scA640-70gc), with pixel array dimensions of 658×494.
The dataset contains 60 subjects in total.
For **VIS** images (considered only the 87 pixels interpupil distance) there are 4 samples per subject with neutral expression (called baseline condition **B**) and 12 samples per subject varying the facial expression (called expression **E**).
Such variability was introduced by asking the subject to count orally.
In total there are 960 images for this modality.
For the **thermal** images there are 4 types of thermal imagery based on the Stokes parameters (:math:`S_0`, :math:`S_1`, :math:`S_2` and :math:`S_3`) commonly used to represent the polarization state.
The thermal imagery is the following:
- :math:`S_0`: The conventional thermal image
- :math:`S_1`
- :math:`S_2`
- DoLP: The degree-of-linear-polarization (DoLP) describes the portion of an electromagnetic wave that is linearly polarized, as defined :math:`\\frac{sqrt(S_{1}^{2} + S_{2}^{2})}{S_0}`.
Since :math:`S_3` is very small and usually taken to be zero, the authors of the database decided not to provide this part of the data.
The same facial expression variability introduced in **VIS** is introduced for **Thermal** images.
The distance between the subject and the camera is the last source of variability introduced in the thermal images.
There are 3 ranges: R1 (2.5m), R2 (5m) and R3 (7.5m).
In total there are 11,520 images for this modality and for each subject they are split as the following:
+----------------+----------+----------+----------+
| Imagery/Range | R1 (B/E) | R2 (B/E) | R3 (B/E) |
+================+==========+==========+==========+
| :math:`S_0` | 16 (8/8) | 16 (8/8) | 16 (8/8) |
+----------------+----------+----------+----------+
| :math:`S_1` | 16 (8/8) | 16 (8/8) | 16 (8/8) |
+----------------+----------+----------+----------+
| :math:`S_2` | 16 (8/8) | 16 (8/8) | 16 (8/8) |
+----------------+----------+----------+----------+
| DoLP | 16 (8/8) | 16 (8/8) | 16 (8/8) |
+----------------+----------+----------+----------+
.. warning::
Use the command below to set the path of the real data::
$ bob config set bob.db.pola-thermal.directory [PATH-TO-MEDS-DATA]
Parameters
----------
protocol: str
One of the database protocols.
"""
def __init__(self, protocol, annotation_type="eyes-center", fixed_positions=None):
# Downloading model if not exists
urls = PolaThermalDatabase.urls()
filename = get_file(
"pola_thermal.tar.gz", urls, file_hash="cfbd7362773c6d49292fe1998e3c3825",
)
directory = rc.get("bob.db.pola-thermal.directory", "")
def load(path):
"""
Images in this dataset are stored as 16-bit PNG [0-65535]
and bob.bio.face assumes images are between 0 and 255,
so we divide by 257: 65535 / 255 = 257
"""
return bob.io.base.load(path) / 257
super().__init__(
name="polathermal",
protocol=protocol,
dataset_protocol_path=filename,
csv_to_sample_loader=make_pipeline(
CSVToSampleLoaderBiometrics(
data_loader=load,
dataset_original_directory=directory,
extension=".png",
),
EyesAnnotations(),
),
annotation_type=annotation_type,
fixed_positions=fixed_positions,
)
@staticmethod
def protocols():
# TODO: Until we have (if we have) a function that dumps the protocols, let's use this one.
return [
"VIS-VIS-split1",
"VIS-VIS-split2",
"VIS-VIS-split3",
"VIS-VIS-split4",
"VIS-VIS-split5",
"VIS-thermal-overall-split1",
"VIS-thermal-overall-split2",
"VIS-thermal-overall-split3",
"VIS-thermal-overall-split4",
"VIS-thermal-overall-split5",
"VIS-polarimetric-overall-split1",
"VIS-polarimetric-overall-split2",
"VIS-polarimetric-overall-split3",
"VIS-polarimetric-overall-split4",
"VIS-polarimetric-overall-split5",
"VIS-thermal-expression-split1",
"VIS-thermal-expression-split2",
"VIS-thermal-expression-split3",
"VIS-thermal-expression-split4",
"VIS-thermal-expression-split5",
"VIS-polarimetric-expression-split1",
"VIS-polarimetric-expression-split2",
"VIS-polarimetric-expression-split3",
"VIS-polarimetric-expression-split4",
"VIS-polarimetric-expression-split5",
"VIS-thermal-R1-split1",
"VIS-thermal-R1-split2",
"VIS-thermal-R1-split3",
"VIS-thermal-R1-split4",
"VIS-thermal-R1-split5",
"VIS-polarimetric-R1-split1",
"VIS-polarimetric-R1-split2",
"VIS-polarimetric-R1-split3",
"VIS-polarimetric-R1-split4",
"VIS-polarimetric-R1-split5",
"VIS-thermal-R2-split1",
"VIS-thermal-R2-split2",
"VIS-thermal-R2-split3",
"VIS-thermal-R2-split4",
"VIS-thermal-R2-split5",
"VIS-polarimetric-R2-split1",
"VIS-polarimetric-R2-split2",
"VIS-polarimetric-R2-split3",
"VIS-polarimetric-R2-split4",
"VIS-polarimetric-R2-split5",
"VIS-thermal-R3-split1",
"VIS-thermal-R3-split2",
"VIS-thermal-R3-split3",
"VIS-thermal-R3-split4",
"VIS-thermal-R3-split5",
"VIS-polarimetric-R3-split1",
"VIS-polarimetric-R3-split2",
"VIS-polarimetric-R3-split3",
"VIS-polarimetric-R3-split4",
"VIS-polarimetric-R3-split5",
]
@staticmethod
def urls():
return [
"https://www.idiap.ch/software/bob/databases/latest/pola_thermal.tar.gz",
"http://www.idiap.ch/software/bob/databases/latest/pola_thermal.tar.gz",
]
| 3,725 | 0 | 79 |
779f4c31a1264d2a29f034d8c08cac74be966211 | 3,725 | py | Python | unittest_reinvent/scoring_tests/physchem/test_slogp_score.py | fujirock/Reinvent | 9c57636f9d32b4ce5b75670f43906a70d5daf886 | [
"MIT"
] | 1 | 2021-08-31T02:28:10.000Z | 2021-08-31T02:28:10.000Z | unittest_reinvent/scoring_tests/physchem/test_slogp_score.py | prasannavd/Reinvent | ca02ebee8d8ed83223c55f4a1dd1b3fbc2359616 | [
"MIT"
] | null | null | null | unittest_reinvent/scoring_tests/physchem/test_slogp_score.py | prasannavd/Reinvent | ca02ebee8d8ed83223c55f4a1dd1b3fbc2359616 | [
"MIT"
] | null | null | null | import unittest
import numpy as np
import numpy.testing as npt
from scoring.component_parameters import ComponentParameters
from scoring.function import CustomSum
from utils.enums.component_specific_parameters_enum import ComponentSpecificParametersEnum
from utils.enums.scoring_function_component_enum import ScoringFunctionComponentNameEnum
from utils.enums.transformation_type_enum import TransformationTypeEnum
| 47.75641 | 127 | 0.517584 | import unittest
import numpy as np
import numpy.testing as npt
from scoring.component_parameters import ComponentParameters
from scoring.function import CustomSum
from utils.enums.component_specific_parameters_enum import ComponentSpecificParametersEnum
from utils.enums.scoring_function_component_enum import ScoringFunctionComponentNameEnum
from utils.enums.transformation_type_enum import TransformationTypeEnum
class Test_slogp_score_no_transformation(unittest.TestCase):
@classmethod
def setUpClass(self):
sf_enum = ScoringFunctionComponentNameEnum()
csp_enum = ComponentSpecificParametersEnum()
ts_parameters = ComponentParameters(component_type=sf_enum.SLOGP,
name="SlogP",
weight=1.,
smiles=[],
model_path="",
specific_parameters={
csp_enum.TRANSFORMATION: False
})
self.sf_state = CustomSum(parameters=[ts_parameters])
def test_slogp_1(self):
smiles = [
"OC(=O)P(=O)(O)O",
"Cc1ccccc1N1C(=O)c2cc(S(N)(=O)=O)c(Cl)cc2NC1C",
"N12CC3C(=NC4C(C=3)=CC=CC=4)C1=CC1=C(COC(=O)C1(O)CC)C2=O",
"N12CC3C(=NC4C(C=3C(=O)O)=CC3=C(OCCO3)C=4)C1=CC1=C(COC(=O)C1(O)CC)C2=O",
"FC1C=CC(CC(=NS(=O)(=O)C2C=CC(C)=CC=2)N2CCN(CC3C4C(=CC=CC=4)N=C4C=3CN3C4=CC4=C(COC(=O)C4(O)CC)C3=O)CC2)=CC=1"
]
values = np.array([-0.1579, 2.71412, 2.0796, 1.549, 4.67482])
score = self.sf_state.get_final_score(smiles=smiles)
npt.assert_array_almost_equal(score.total_score, values, 2)
class Test_slogp_score_with_double_sigmoid(unittest.TestCase):
@classmethod
def setUpClass(self):
sf_enum = ScoringFunctionComponentNameEnum()
csp_enum = ComponentSpecificParametersEnum()
tt_enum = TransformationTypeEnum()
specific_parameters = {
csp_enum.TRANSFORMATION: True,
csp_enum.LOW: 1,
csp_enum.HIGH: 3,
csp_enum.COEF_DIV: 3,
csp_enum.COEF_SI: 10,
csp_enum.COEF_SE: 10,
csp_enum.TRANSFORMATION_TYPE: tt_enum.DOUBLE_SIGMOID
}
ts_parameters = ComponentParameters(component_type=sf_enum.SLOGP,
name="SlogP",
weight=1.,
smiles=[],
model_path="",
specific_parameters=specific_parameters
)
self.sf_state = CustomSum(parameters=[ts_parameters])
def test_slogp_1(self):
smiles = [
"OC(=O)P(=O)(O)O",
"Cc1ccccc1N1C(=O)c2cc(S(N)(=O)=O)c(Cl)cc2NC1C",
"N12CC3C(=NC4C(C=3)=CC=CC=4)C1=CC1=C(COC(=O)C1(O)CC)C2=O",
"N12CC3C(=NC4C(C=3C(=O)O)=CC3=C(OCCO3)C=4)C1=CC1=C(COC(=O)C1(O)CC)C2=O",
"FC1C=CC(CC(=NS(=O)(=O)C2C=CC(C)=CC=2)N2CCN(CC3C4C(=CC=CC=4)N=C4C=3CN3C4=CC4=C(COC(=O)C4(O)CC)C3=O)CC2)=CC=1"
]
values = np.array([0.0, 0.9, 1.0, 1.0, 0.0])
score = self.sf_state.get_final_score(smiles=smiles)
npt.assert_array_almost_equal(score.total_score, values, 2)
| 3,038 | 223 | 46 |
9afdc4c833bc248918013ec47848b1b410c62331 | 5,661 | py | Python | Repository_files/TLCS/models/create_plots.py | kraken24/mhp_hackathon_sustainable_traffic_light_challenge | 0b4c61968d54c7e8faceb07a2c78c70570a2e162 | [
"CC0-1.0"
] | null | null | null | Repository_files/TLCS/models/create_plots.py | kraken24/mhp_hackathon_sustainable_traffic_light_challenge | 0b4c61968d54c7e8faceb07a2c78c70570a2e162 | [
"CC0-1.0"
] | null | null | null | Repository_files/TLCS/models/create_plots.py | kraken24/mhp_hackathon_sustainable_traffic_light_challenge | 0b4c61968d54c7e8faceb07a2c78c70570a2e162 | [
"CC0-1.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sat Mar 5 02:12:12 2022
@author: Kraken
Project: MHP Hackathon
"""
import os
import pandas as pd
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size': 16})
WORKING_DIR = "model_14"
WORKING_DIR2 = "model_12"
# "model_8": dqn with fixed weights
# "model_4": dqn
MVG_AVG_WINDOW = 5
# =============================================================================
# Queue Plots - Combined
# =============================================================================
QUEUE = "plot_queue_data.txt"
# rl agent
with open(os.path.join(WORKING_DIR, QUEUE), "r") as txtfile:
data = txtfile.readlines()
data = [float(x.rstrip("\n")) for x in data][:250]
# ctl
with open(os.path.join(WORKING_DIR2, QUEUE), "r") as txtfile:
data2 = txtfile.readlines()
data2 = [float(x.rstrip("\n")) for x in data2]
fig = plt.figure(figsize=(12, 8))
plt.plot(data2, "blue", label="Conventional Traffic Lights")
plt.plot(data, "orange", label="RL Agent")
plt.xlabel("# Episodes")
plt.ylabel("Average queue length (vehicles)")
plt.title("Conventional Traffic Lights & RL Optimized Smart Traffic Lights")
plt.grid()
plt.legend(loc="upper right")
plt.savefig(QUEUE.replace("_data.txt", "_combined.png"))
# =============================================================================
# Delay Plots - Combined
# =============================================================================
QUEUE = "plot_delay_data.txt"
# rl agent
with open(os.path.join(WORKING_DIR, QUEUE), "r") as txtfile:
data = txtfile.readlines()
data = [float(x.rstrip("\n")) for x in data][:250]
# ctl
with open(os.path.join(WORKING_DIR2, QUEUE), "r") as txtfile:
data2 = txtfile.readlines()
data2 = [float(x.rstrip("\n")) for x in data2]
fig = plt.figure(figsize=(12, 8))
plt.plot(data, "orange", label="RL Agent")
plt.plot(data2, "blue", label="Conventional Traffic Lights")
plt.xlabel("# Episodes")
plt.ylabel("Cumulative Delay (s)")
plt.title("Conventional Traffic Lights & RL Optimized Smart Traffic Lights")
plt.grid()
plt.legend(loc="upper right")
plt.savefig(QUEUE.replace("_data.txt", "_combined.png"))
# =============================================================================
# Reward Plots - Combined
# =============================================================================
QUEUE = "plot_reward_data.txt"
# rl agent
with open(os.path.join(WORKING_DIR, QUEUE), "r") as txtfile:
data = txtfile.readlines()
data = [float(x.rstrip("\n")) for x in data][:250]
# ctl
with open(os.path.join(WORKING_DIR2, QUEUE), "r") as txtfile:
data2 = txtfile.readlines()
data2 = [float(x.rstrip("\n")) for x in data2]
fig = plt.figure(figsize=(12, 8))
plt.plot(data, "orange", label="RL Agent")
plt.plot(data2, "blue", label="Conventional Traffic Lights")
plt.xlabel("# Episodes")
plt.ylabel("Cumulative Negative Reward")
plt.title("Conventional Traffic Lights & RL Optimized Smart Traffic Lights")
plt.grid()
plt.legend(loc="best")
plt.savefig(QUEUE.replace("_data.txt", "_combined.png"))
WORKING_DIR = "model_14"
MVG_AVG_WINDOW = 5
# =============================================================================
# Queue Plots
# =============================================================================
QUEUE = "plot_queue_data.txt"
with open(os.path.join(WORKING_DIR, QUEUE), "r") as txtfile:
data = txtfile.readlines()
data = [float(x.rstrip("\n")) for x in data]
data_series = pd.Series(data).rolling(MVG_AVG_WINDOW).mean().tolist()
first_value = data_series[MVG_AVG_WINDOW - 1]
last_value = data_series[-1]
perc_decrease = (first_value - last_value) / first_value * 100
fig = plt.figure(figsize=(12, 8))
plt.plot(data)
plt.plot(data_series, "r")
plt.xlabel("# Episodes")
plt.ylabel("Average queue length (vehicles)")
plt.title(f"Decrease: {first_value:.2f} -> {last_value:.2f} = {perc_decrease:.2f}%")
plt.savefig(os.path.join(WORKING_DIR, QUEUE.replace("_data.txt", "_new.png")))
# =============================================================================
# Delay Plots
# =============================================================================
DELAY = "plot_delay_data.txt"
with open(os.path.join(WORKING_DIR, DELAY), "r") as txtfile:
data = txtfile.readlines()
data = [float(x.rstrip("\n")) for x in data]
data_series = pd.Series(data).rolling(MVG_AVG_WINDOW).mean().tolist()
first_value = data_series[MVG_AVG_WINDOW - 1]
last_value = data_series[-1]
perc_decrease = (first_value - last_value) / first_value * 100
fig = plt.figure(figsize=(12, 8))
plt.plot(data)
plt.plot(data_series, "r")
plt.xlabel("# Episodes")
plt.ylabel("Cumulative Delay (s) / 1000 vehicles")
plt.title(f"Decrease: {first_value:.2f} -> {last_value:.2f} = {perc_decrease:.2f}%")
plt.savefig(os.path.join(WORKING_DIR, DELAY.replace("_data.txt", "_new.png")))
# =============================================================================
# Reward Plots
# =============================================================================
REWARD = "plot_reward_data.txt"
with open(os.path.join(WORKING_DIR, REWARD), "r") as txtfile:
data = txtfile.readlines()
data = [float(x.rstrip("\n")) for x in data]
data_series = pd.Series(data).rolling(MVG_AVG_WINDOW).mean().tolist()
first_value = data_series[MVG_AVG_WINDOW - 1]
last_value = data_series[-1]
perc_decrease = (first_value - last_value) / first_value * 100
fig = plt.figure(figsize=(12, 8))
plt.plot(data)
plt.plot(data_series, "r")
plt.xlabel("# Episodes")
plt.ylabel("Cumulative negative reward")
plt.title("Reward Maximization by RL Agent")
plt.savefig(os.path.join(WORKING_DIR, REWARD.replace("_data.txt", "_new.png")))
| 35.161491 | 84 | 0.585409 | # -*- coding: utf-8 -*-
"""
Created on Sat Mar 5 02:12:12 2022
@author: Kraken
Project: MHP Hackathon
"""
import os
import pandas as pd
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size': 16})
WORKING_DIR = "model_14"
WORKING_DIR2 = "model_12"
# "model_8": dqn with fixed weights
# "model_4": dqn
MVG_AVG_WINDOW = 5
# =============================================================================
# Queue Plots - Combined
# =============================================================================
QUEUE = "plot_queue_data.txt"
# rl agent
with open(os.path.join(WORKING_DIR, QUEUE), "r") as txtfile:
data = txtfile.readlines()
data = [float(x.rstrip("\n")) for x in data][:250]
# ctl
with open(os.path.join(WORKING_DIR2, QUEUE), "r") as txtfile:
data2 = txtfile.readlines()
data2 = [float(x.rstrip("\n")) for x in data2]
fig = plt.figure(figsize=(12, 8))
plt.plot(data2, "blue", label="Conventional Traffic Lights")
plt.plot(data, "orange", label="RL Agent")
plt.xlabel("# Episodes")
plt.ylabel("Average queue length (vehicles)")
plt.title("Conventional Traffic Lights & RL Optimized Smart Traffic Lights")
plt.grid()
plt.legend(loc="upper right")
plt.savefig(QUEUE.replace("_data.txt", "_combined.png"))
# =============================================================================
# Delay Plots - Combined
# =============================================================================
QUEUE = "plot_delay_data.txt"
# rl agent
with open(os.path.join(WORKING_DIR, QUEUE), "r") as txtfile:
data = txtfile.readlines()
data = [float(x.rstrip("\n")) for x in data][:250]
# ctl
with open(os.path.join(WORKING_DIR2, QUEUE), "r") as txtfile:
data2 = txtfile.readlines()
data2 = [float(x.rstrip("\n")) for x in data2]
fig = plt.figure(figsize=(12, 8))
plt.plot(data, "orange", label="RL Agent")
plt.plot(data2, "blue", label="Conventional Traffic Lights")
plt.xlabel("# Episodes")
plt.ylabel("Cumulative Delay (s)")
plt.title("Conventional Traffic Lights & RL Optimized Smart Traffic Lights")
plt.grid()
plt.legend(loc="upper right")
plt.savefig(QUEUE.replace("_data.txt", "_combined.png"))
# =============================================================================
# Reward Plots - Combined
# =============================================================================
QUEUE = "plot_reward_data.txt"
# rl agent
with open(os.path.join(WORKING_DIR, QUEUE), "r") as txtfile:
data = txtfile.readlines()
data = [float(x.rstrip("\n")) for x in data][:250]
# ctl
with open(os.path.join(WORKING_DIR2, QUEUE), "r") as txtfile:
data2 = txtfile.readlines()
data2 = [float(x.rstrip("\n")) for x in data2]
fig = plt.figure(figsize=(12, 8))
plt.plot(data, "orange", label="RL Agent")
plt.plot(data2, "blue", label="Conventional Traffic Lights")
plt.xlabel("# Episodes")
plt.ylabel("Cumulative Negative Reward")
plt.title("Conventional Traffic Lights & RL Optimized Smart Traffic Lights")
plt.grid()
plt.legend(loc="best")
plt.savefig(QUEUE.replace("_data.txt", "_combined.png"))
WORKING_DIR = "model_14"
MVG_AVG_WINDOW = 5
# =============================================================================
# Queue Plots
# =============================================================================
QUEUE = "plot_queue_data.txt"
with open(os.path.join(WORKING_DIR, QUEUE), "r") as txtfile:
data = txtfile.readlines()
data = [float(x.rstrip("\n")) for x in data]
data_series = pd.Series(data).rolling(MVG_AVG_WINDOW).mean().tolist()
first_value = data_series[MVG_AVG_WINDOW - 1]
last_value = data_series[-1]
perc_decrease = (first_value - last_value) / first_value * 100
fig = plt.figure(figsize=(12, 8))
plt.plot(data)
plt.plot(data_series, "r")
plt.xlabel("# Episodes")
plt.ylabel("Average queue length (vehicles)")
plt.title(f"Decrease: {first_value:.2f} -> {last_value:.2f} = {perc_decrease:.2f}%")
plt.savefig(os.path.join(WORKING_DIR, QUEUE.replace("_data.txt", "_new.png")))
# =============================================================================
# Delay Plots
# =============================================================================
DELAY = "plot_delay_data.txt"
with open(os.path.join(WORKING_DIR, DELAY), "r") as txtfile:
data = txtfile.readlines()
data = [float(x.rstrip("\n")) for x in data]
data_series = pd.Series(data).rolling(MVG_AVG_WINDOW).mean().tolist()
first_value = data_series[MVG_AVG_WINDOW - 1]
last_value = data_series[-1]
perc_decrease = (first_value - last_value) / first_value * 100
fig = plt.figure(figsize=(12, 8))
plt.plot(data)
plt.plot(data_series, "r")
plt.xlabel("# Episodes")
plt.ylabel("Cumulative Delay (s) / 1000 vehicles")
plt.title(f"Decrease: {first_value:.2f} -> {last_value:.2f} = {perc_decrease:.2f}%")
plt.savefig(os.path.join(WORKING_DIR, DELAY.replace("_data.txt", "_new.png")))
# =============================================================================
# Reward Plots
# =============================================================================
REWARD = "plot_reward_data.txt"
with open(os.path.join(WORKING_DIR, REWARD), "r") as txtfile:
data = txtfile.readlines()
data = [float(x.rstrip("\n")) for x in data]
data_series = pd.Series(data).rolling(MVG_AVG_WINDOW).mean().tolist()
first_value = data_series[MVG_AVG_WINDOW - 1]
last_value = data_series[-1]
perc_decrease = (first_value - last_value) / first_value * 100
fig = plt.figure(figsize=(12, 8))
plt.plot(data)
plt.plot(data_series, "r")
plt.xlabel("# Episodes")
plt.ylabel("Cumulative negative reward")
plt.title("Reward Maximization by RL Agent")
plt.savefig(os.path.join(WORKING_DIR, REWARD.replace("_data.txt", "_new.png")))
| 0 | 0 | 0 |
ab4dcf42708ad98c9f00d476dd47908b95919fce | 22,998 | py | Python | src/deepke/relation_extraction/multimodal/models/clip/feature_extraction_utils.py | johncolezhang/DeepKE | ea4552ec42cb003a835f00fc14fb454f9a9a7183 | [
"MIT"
] | 3 | 2022-02-18T05:03:02.000Z | 2022-03-19T12:32:16.000Z | src/deepke/relation_extraction/multimodal/models/clip/feature_extraction_utils.py | johncolezhang/DeepKE | ea4552ec42cb003a835f00fc14fb454f9a9a7183 | [
"MIT"
] | null | null | null | src/deepke/relation_extraction/multimodal/models/clip/feature_extraction_utils.py | johncolezhang/DeepKE | ea4552ec42cb003a835f00fc14fb454f9a9a7183 | [
"MIT"
] | null | null | null | # coding=utf-8
# Copyright 2021 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Feature extraction saving/loading class for common feature extractors.
"""
import copy
import json
import os
from collections import UserDict
from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple, Union
import numpy as np
from transformers.file_utils import (
cached_path,
hf_bucket_url,
is_flax_available,
is_remote_url,
is_tf_available,
is_torch_available,
torch_required,
)
from transformers.utils import logging
from .file_utils import *
FEATURE_EXTRACTOR_NAME = "preprocessor_config.json"
ENV_VARS_TRUE_VALUES = {"1", "ON", "YES", "TRUE"}
_is_offline_mode = True if os.environ.get("TRANSFORMERS_OFFLINE", "0").upper() in ENV_VARS_TRUE_VALUES else False
from enum import Enum
class ExplicitEnum(Enum):
"""
Enum with more explicit error message for missing values.
"""
@classmethod
class TensorType(ExplicitEnum):
"""
Possible values for the `return_tensors` argument in [`PreTrainedTokenizerBase.__call__`]. Useful for
tab-completion in an IDE.
"""
PYTORCH = "pt"
TENSORFLOW = "tf"
NUMPY = "np"
JAX = "jax"
if TYPE_CHECKING:
if is_torch_available():
import torch
logger = logging.get_logger(__name__)
PreTrainedFeatureExtractor = Union["SequenceFeatureExtractor"] # noqa: F821
class BatchFeature(UserDict):
r"""
Holds the output of the :meth:`~transformers.SequenceFeatureExtractor.pad` and feature extractor specific
``__call__`` methods.
This class is derived from a python dictionary and can be used as a dictionary.
Args:
data (:obj:`dict`):
Dictionary of lists/arrays/tensors returned by the __call__/pad methods ('input_values', 'attention_mask',
etc.).
tensor_type (:obj:`Union[None, str, TensorType]`, `optional`):
You can give a tensor_type here to convert the lists of integers in PyTorch/TensorFlow/Numpy Tensors at
initialization.
"""
def __getitem__(self, item: str) -> Union[Any]:
"""
If the key is a string, returns the value of the dict associated to :obj:`key` ('input_values',
'attention_mask', etc.).
"""
if isinstance(item, str):
return self.data[item]
else:
raise KeyError("Indexing with integers is not available when using Python based feature extractors")
# Copied from transformers.tokenization_utils_base.BatchEncoding.keys
# Copied from transformers.tokenization_utils_base.BatchEncoding.values
# Copied from transformers.tokenization_utils_base.BatchEncoding.items
def convert_to_tensors(self, tensor_type: Optional[Union[str, TensorType]] = None):
"""
Convert the inner content to tensors.
Args:
tensor_type (:obj:`str` or :class:`~transformers.file_utils.TensorType`, `optional`):
The type of tensors to use. If :obj:`str`, should be one of the values of the enum
:class:`~transformers.file_utils.TensorType`. If :obj:`None`, no modification is done.
"""
if tensor_type is None:
return self
# Convert to TensorType
if not isinstance(tensor_type, TensorType):
tensor_type = TensorType(tensor_type)
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
"Unable to convert output to TensorFlow tensors format, TensorFlow is not installed."
)
import tensorflow as tf
as_tensor = tf.constant
is_tensor = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError("Unable to convert output to PyTorch tensors format, PyTorch is not installed.")
import torch
as_tensor = torch.tensor
is_tensor = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError("Unable to convert output to JAX tensors format, JAX is not installed.")
import jax.numpy as jnp # noqa: F811
as_tensor = jnp.array
is_tensor = _is_jax
else:
as_tensor = np.asarray
is_tensor = _is_numpy
# Do the tensor conversion in batch
for key, value in self.items():
try:
if not is_tensor(value):
tensor = as_tensor(value)
self[key] = tensor
except: # noqa E722
if key == "overflowing_values":
raise ValueError("Unable to create tensor returning overflowing values of different lengths. ")
raise ValueError(
"Unable to create tensor, you should probably activate padding "
"with 'padding=True' to have batched tensors with the same length."
)
return self
@torch_required
# Copied from transformers.tokenization_utils_base.BatchEncoding.to with BatchEncoding->BatchFeature
def to(self, device: Union[str, "torch.device"]) -> "BatchFeature":
"""
Send all values to device by calling :obj:`v.to(device)` (PyTorch only).
Args:
device (:obj:`str` or :obj:`torch.device`): The device to put the tensors on.
Returns:
:class:`~transformers.BatchFeature`: The same instance after modification.
"""
# This check catches things like APEX blindly calling "to" on all inputs to a module
# Otherwise it passes the casts down and casts the LongTensor containing the token idxs
# into a HalfTensor
if isinstance(device, str) or _is_torch_device(device) or isinstance(device, int):
self.data = {k: v.to(device=device) for k, v in self.data.items()}
else:
logger.warning(f"Attempting to cast a BatchFeature to type {str(device)}. This is not supported.")
return self
class FeatureExtractionMixin:
"""
This is a feature extraction mixin used to provide saving/loading functionality for sequential and image feature
extractors.
"""
def __init__(self, **kwargs):
"""Set elements of `kwargs` as attributes."""
# Additional attributes without default values
for key, value in kwargs.items():
try:
setattr(self, key, value)
except AttributeError as err:
logger.error(f"Can't set {key} with value {value} for {self}")
raise err
@classmethod
def from_pretrained(
cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs
) -> PreTrainedFeatureExtractor:
r"""
Instantiate a type of :class:`~transformers.feature_extraction_utils.FeatureExtractionMixin` from a feature
extractor, *e.g.* a derived class of :class:`~transformers.SequenceFeatureExtractor`.
Args:
pretrained_model_name_or_path (:obj:`str` or :obj:`os.PathLike`):
This can be either:
- a string, the `model id` of a pretrained feature_extractor hosted inside a model repo on
huggingface.co. Valid model ids can be located at the root-level, like ``bert-base-uncased``, or
namespaced under a user or organization name, like ``dbmdz/bert-base-german-cased``.
- a path to a `directory` containing a feature extractor file saved using the
:func:`~transformers.feature_extraction_utils.FeatureExtractionMixin.save_pretrained` method, e.g.,
``./my_model_directory/``.
- a path or url to a saved feature extractor JSON `file`, e.g.,
``./my_model_directory/preprocessor_config.json``.
cache_dir (:obj:`str` or :obj:`os.PathLike`, `optional`):
Path to a directory in which a downloaded pretrained model feature extractor should be cached if the
standard cache should not be used.
force_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to force to (re-)download the feature extractor files and override the cached versions
if they exist.
resume_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to delete incompletely received file. Attempts to resume the download if such a file
exists.
proxies (:obj:`Dict[str, str]`, `optional`):
A dictionary of proxy servers to use by protocol or endpoint, e.g., :obj:`{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
use_auth_token (:obj:`str` or `bool`, `optional`):
The token to use as HTTP bearer authorization for remote files. If :obj:`True`, will use the token
generated when running :obj:`transformers-cli login` (stored in :obj:`~/.huggingface`).
revision(:obj:`str`, `optional`, defaults to :obj:`"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so ``revision`` can be any
identifier allowed by git.
return_unused_kwargs (:obj:`bool`, `optional`, defaults to :obj:`False`):
If :obj:`False`, then this function returns just the final feature extractor object. If :obj:`True`,
then this functions returns a :obj:`Tuple(feature_extractor, unused_kwargs)` where `unused_kwargs` is a
dictionary consisting of the key/value pairs whose keys are not feature extractor attributes: i.e., the
part of ``kwargs`` which has not been used to update ``feature_extractor`` and is otherwise ignored.
kwargs (:obj:`Dict[str, Any]`, `optional`):
The values in kwargs of any keys which are feature extractor attributes will be used to override the
loaded values. Behavior concerning key/value pairs whose keys are *not* feature extractor attributes is
controlled by the ``return_unused_kwargs`` keyword parameter.
.. note::
Passing :obj:`use_auth_token=True` is required when you want to use a private model.
Returns:
A feature extractor of type :class:`~transformers.feature_extraction_utils.FeatureExtractionMixin`.
Examples::
# We can't instantiate directly the base class `FeatureExtractionMixin` nor `SequenceFeatureExtractor` so let's show the examples on a
# derived class: `Wav2Vec2FeatureExtractor`
feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h') # Download feature_extraction_config from huggingface.co and cache.
feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained('./test/saved_model/') # E.g. feature_extractor (or model) was saved using `save_pretrained('./test/saved_model/')`
feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained('./test/saved_model/preprocessor_config.json')
feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h', return_attention_mask=False, foo=False)
assert feature_extractor.return_attention_mask is False
feature_extractor, unused_kwargs = Wav2Vec2FeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h', return_attention_mask=False,
foo=False, return_unused_kwargs=True)
assert feature_extractor.return_attention_mask is False
assert unused_kwargs == {'foo': False}
"""
feature_extractor_dict, kwargs = cls.get_feature_extractor_dict(pretrained_model_name_or_path, **kwargs)
return cls.from_dict(feature_extractor_dict, **kwargs)
def save_pretrained(self, save_directory: Union[str, os.PathLike]):
"""
Save a feature_extractor object to the directory ``save_directory``, so that it can be re-loaded using the
:func:`~transformers.feature_extraction_utils.FeatureExtractionMixin.from_pretrained` class method.
Args:
save_directory (:obj:`str` or :obj:`os.PathLike`):
Directory where the feature extractor JSON file will be saved (will be created if it does not exist).
"""
if os.path.isfile(save_directory):
raise AssertionError(f"Provided path ({save_directory}) should be a directory, not a file")
os.makedirs(save_directory, exist_ok=True)
# If we save using the predefined names, we can load using `from_pretrained`
output_feature_extractor_file = os.path.join(save_directory, FEATURE_EXTRACTOR_NAME)
self.to_json_file(output_feature_extractor_file)
logger.info(f"Configuration saved in {output_feature_extractor_file}")
@classmethod
def get_feature_extractor_dict(
cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
"""
From a ``pretrained_model_name_or_path``, resolve to a dictionary of parameters, to be used for instantiating a
feature extractor of type :class:`~transformers.feature_extraction_utils.FeatureExtractionMixin` using
``from_dict``.
Parameters:
pretrained_model_name_or_path (:obj:`str` or :obj:`os.PathLike`):
The identifier of the pre-trained checkpoint from which we want the dictionary of parameters.
Returns:
:obj:`Tuple[Dict, Dict]`: The dictionary(ies) that will be used to instantiate the feature extractor
object.
"""
cache_dir = kwargs.pop("cache_dir", None)
force_download = kwargs.pop("force_download", False)
resume_download = kwargs.pop("resume_download", False)
proxies = kwargs.pop("proxies", None)
use_auth_token = kwargs.pop("use_auth_token", None)
local_files_only = kwargs.pop("local_files_only", False)
revision = kwargs.pop("revision", None)
from_pipeline = kwargs.pop("_from_pipeline", None)
from_auto_class = kwargs.pop("_from_auto", False)
user_agent = {"file_type": "feature extractor", "from_auto_class": from_auto_class}
if from_pipeline is not None:
user_agent["using_pipeline"] = from_pipeline
if is_offline_mode() and not local_files_only:
logger.info("Offline mode: forcing local_files_only=True")
local_files_only = True
pretrained_model_name_or_path = str(pretrained_model_name_or_path)
if os.path.isdir(pretrained_model_name_or_path):
feature_extractor_file = os.path.join(pretrained_model_name_or_path, FEATURE_EXTRACTOR_NAME)
elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):
feature_extractor_file = pretrained_model_name_or_path
else:
feature_extractor_file = hf_bucket_url(
pretrained_model_name_or_path, filename=FEATURE_EXTRACTOR_NAME, revision=revision, mirror=None
)
try:
# Load from URL or cache if already cached
resolved_feature_extractor_file = cached_path(
feature_extractor_file,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
local_files_only=local_files_only,
use_auth_token=use_auth_token,
user_agent=user_agent,
)
# Load feature_extractor dict
with open(resolved_feature_extractor_file, "r", encoding="utf-8") as reader:
text = reader.read()
feature_extractor_dict = json.loads(text)
except EnvironmentError as err:
logger.error(err)
msg = (
f"Can't load feature extractor for '{pretrained_model_name_or_path}'. Make sure that:\n\n"
f"- '{pretrained_model_name_or_path}' is a correct model identifier listed on 'https://huggingface.co/models'\n\n"
f"- or '{pretrained_model_name_or_path}' is the correct path to a directory containing a {FEATURE_EXTRACTOR_NAME} file\n\n"
)
raise EnvironmentError(msg)
except json.JSONDecodeError:
msg = (
f"Couldn't reach server at '{feature_extractor_file}' to download feature extractor configuration file or "
"feature extractor configuration file is not a valid JSON file. "
f"Please check network or file content here: {resolved_feature_extractor_file}."
)
raise EnvironmentError(msg)
if resolved_feature_extractor_file == feature_extractor_file:
logger.info(f"loading feature extractor configuration file {feature_extractor_file}")
else:
logger.info(
f"loading feature extractor configuration file {feature_extractor_file} from cache at {resolved_feature_extractor_file}"
)
return feature_extractor_dict, kwargs
@classmethod
def from_dict(cls, feature_extractor_dict: Dict[str, Any], **kwargs) -> PreTrainedFeatureExtractor:
"""
Instantiates a type of :class:`~transformers.feature_extraction_utils.FeatureExtractionMixin` from a Python
dictionary of parameters.
Args:
feature_extractor_dict (:obj:`Dict[str, Any]`):
Dictionary that will be used to instantiate the feature extractor object. Such a dictionary can be
retrieved from a pretrained checkpoint by leveraging the
:func:`~transformers.feature_extraction_utils.FeatureExtractionMixin.to_dict` method.
kwargs (:obj:`Dict[str, Any]`):
Additional parameters from which to initialize the feature extractor object.
Returns:
:class:`~transformers.feature_extraction_utils.FeatureExtractionMixin`: The feature extractor object
instantiated from those parameters.
"""
return_unused_kwargs = kwargs.pop("return_unused_kwargs", False)
feature_extractor = cls(**feature_extractor_dict)
# Update feature_extractor with kwargs if needed
to_remove = []
for key, value in kwargs.items():
if hasattr(feature_extractor, key):
setattr(feature_extractor, key, value)
to_remove.append(key)
for key in to_remove:
kwargs.pop(key, None)
logger.info(f"Feature extractor {feature_extractor}")
if return_unused_kwargs:
return feature_extractor, kwargs
else:
return feature_extractor
def to_dict(self) -> Dict[str, Any]:
"""
Serializes this instance to a Python dictionary.
Returns:
:obj:`Dict[str, Any]`: Dictionary of all the attributes that make up this feature extractor instance.
"""
output = copy.deepcopy(self.__dict__)
output["feature_extractor_type"] = self.__class__.__name__
return output
@classmethod
def from_json_file(cls, json_file: Union[str, os.PathLike]) -> PreTrainedFeatureExtractor:
"""
Instantiates a feature extractor of type :class:`~transformers.feature_extraction_utils.FeatureExtractionMixin`
from the path to a JSON file of parameters.
Args:
json_file (:obj:`str` or :obj:`os.PathLike`):
Path to the JSON file containing the parameters.
Returns:
A feature extractor of type :class:`~transformers.feature_extraction_utils.FeatureExtractionMixin`: The
feature_extractor object instantiated from that JSON file.
"""
with open(json_file, "r", encoding="utf-8") as reader:
text = reader.read()
feature_extractor_dict = json.loads(text)
return cls(**feature_extractor_dict)
def to_json_string(self) -> str:
"""
Serializes this instance to a JSON string.
Returns:
:obj:`str`: String containing all the attributes that make up this feature_extractor instance in JSON
format.
"""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path: Union[str, os.PathLike]):
"""
Save this instance to a JSON file.
Args:
json_file_path (:obj:`str` or :obj:`os.PathLike`):
Path to the JSON file in which this feature_extractor instance's parameters will be saved.
"""
with open(json_file_path, "w", encoding="utf-8") as writer:
writer.write(self.to_json_string())
| 44.226923 | 189 | 0.651709 | # coding=utf-8
# Copyright 2021 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Feature extraction saving/loading class for common feature extractors.
"""
import copy
import json
import os
from collections import UserDict
from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple, Union
import numpy as np
from transformers.file_utils import (
cached_path,
hf_bucket_url,
is_flax_available,
is_remote_url,
is_tf_available,
is_torch_available,
torch_required,
)
from transformers.utils import logging
from .file_utils import *
FEATURE_EXTRACTOR_NAME = "preprocessor_config.json"
ENV_VARS_TRUE_VALUES = {"1", "ON", "YES", "TRUE"}
_is_offline_mode = True if os.environ.get("TRANSFORMERS_OFFLINE", "0").upper() in ENV_VARS_TRUE_VALUES else False
def is_offline_mode():
return _is_offline_mode
def _is_jax(x):
import jax.numpy as jnp # noqa: F811
return isinstance(x, jnp.ndarray)
def _is_numpy(x):
return isinstance(x, np.ndarray)
def _is_torch_device(x):
import torch
return isinstance(x, torch.device)
from enum import Enum
class ExplicitEnum(Enum):
"""
Enum with more explicit error message for missing values.
"""
@classmethod
def _missing_(cls, value):
raise ValueError(
f"{value} is not a valid {cls.__name__}, please select one of {list(cls._value2member_map_.keys())}"
)
class TensorType(ExplicitEnum):
"""
Possible values for the `return_tensors` argument in [`PreTrainedTokenizerBase.__call__`]. Useful for
tab-completion in an IDE.
"""
PYTORCH = "pt"
TENSORFLOW = "tf"
NUMPY = "np"
JAX = "jax"
if TYPE_CHECKING:
if is_torch_available():
import torch
logger = logging.get_logger(__name__)
PreTrainedFeatureExtractor = Union["SequenceFeatureExtractor"] # noqa: F821
class BatchFeature(UserDict):
r"""
Holds the output of the :meth:`~transformers.SequenceFeatureExtractor.pad` and feature extractor specific
``__call__`` methods.
This class is derived from a python dictionary and can be used as a dictionary.
Args:
data (:obj:`dict`):
Dictionary of lists/arrays/tensors returned by the __call__/pad methods ('input_values', 'attention_mask',
etc.).
tensor_type (:obj:`Union[None, str, TensorType]`, `optional`):
You can give a tensor_type here to convert the lists of integers in PyTorch/TensorFlow/Numpy Tensors at
initialization.
"""
def __init__(self, data: Optional[Dict[str, Any]] = None, tensor_type: Union[None, str, TensorType] = None):
super().__init__(data)
self.convert_to_tensors(tensor_type=tensor_type)
def __getitem__(self, item: str) -> Union[Any]:
"""
If the key is a string, returns the value of the dict associated to :obj:`key` ('input_values',
'attention_mask', etc.).
"""
if isinstance(item, str):
return self.data[item]
else:
raise KeyError("Indexing with integers is not available when using Python based feature extractors")
def __getattr__(self, item: str):
try:
return self.data[item]
except KeyError:
raise AttributeError
def __getstate__(self):
return {"data": self.data}
def __setstate__(self, state):
if "data" in state:
self.data = state["data"]
# Copied from transformers.tokenization_utils_base.BatchEncoding.keys
def keys(self):
return self.data.keys()
# Copied from transformers.tokenization_utils_base.BatchEncoding.values
def values(self):
return self.data.values()
# Copied from transformers.tokenization_utils_base.BatchEncoding.items
def items(self):
return self.data.items()
def convert_to_tensors(self, tensor_type: Optional[Union[str, TensorType]] = None):
"""
Convert the inner content to tensors.
Args:
tensor_type (:obj:`str` or :class:`~transformers.file_utils.TensorType`, `optional`):
The type of tensors to use. If :obj:`str`, should be one of the values of the enum
:class:`~transformers.file_utils.TensorType`. If :obj:`None`, no modification is done.
"""
if tensor_type is None:
return self
# Convert to TensorType
if not isinstance(tensor_type, TensorType):
tensor_type = TensorType(tensor_type)
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
"Unable to convert output to TensorFlow tensors format, TensorFlow is not installed."
)
import tensorflow as tf
as_tensor = tf.constant
is_tensor = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError("Unable to convert output to PyTorch tensors format, PyTorch is not installed.")
import torch
as_tensor = torch.tensor
is_tensor = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError("Unable to convert output to JAX tensors format, JAX is not installed.")
import jax.numpy as jnp # noqa: F811
as_tensor = jnp.array
is_tensor = _is_jax
else:
as_tensor = np.asarray
is_tensor = _is_numpy
# Do the tensor conversion in batch
for key, value in self.items():
try:
if not is_tensor(value):
tensor = as_tensor(value)
self[key] = tensor
except: # noqa E722
if key == "overflowing_values":
raise ValueError("Unable to create tensor returning overflowing values of different lengths. ")
raise ValueError(
"Unable to create tensor, you should probably activate padding "
"with 'padding=True' to have batched tensors with the same length."
)
return self
@torch_required
# Copied from transformers.tokenization_utils_base.BatchEncoding.to with BatchEncoding->BatchFeature
def to(self, device: Union[str, "torch.device"]) -> "BatchFeature":
"""
Send all values to device by calling :obj:`v.to(device)` (PyTorch only).
Args:
device (:obj:`str` or :obj:`torch.device`): The device to put the tensors on.
Returns:
:class:`~transformers.BatchFeature`: The same instance after modification.
"""
# This check catches things like APEX blindly calling "to" on all inputs to a module
# Otherwise it passes the casts down and casts the LongTensor containing the token idxs
# into a HalfTensor
if isinstance(device, str) or _is_torch_device(device) or isinstance(device, int):
self.data = {k: v.to(device=device) for k, v in self.data.items()}
else:
logger.warning(f"Attempting to cast a BatchFeature to type {str(device)}. This is not supported.")
return self
class FeatureExtractionMixin:
"""
This is a feature extraction mixin used to provide saving/loading functionality for sequential and image feature
extractors.
"""
def __init__(self, **kwargs):
"""Set elements of `kwargs` as attributes."""
# Additional attributes without default values
for key, value in kwargs.items():
try:
setattr(self, key, value)
except AttributeError as err:
logger.error(f"Can't set {key} with value {value} for {self}")
raise err
@classmethod
def from_pretrained(
cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs
) -> PreTrainedFeatureExtractor:
r"""
Instantiate a type of :class:`~transformers.feature_extraction_utils.FeatureExtractionMixin` from a feature
extractor, *e.g.* a derived class of :class:`~transformers.SequenceFeatureExtractor`.
Args:
pretrained_model_name_or_path (:obj:`str` or :obj:`os.PathLike`):
This can be either:
- a string, the `model id` of a pretrained feature_extractor hosted inside a model repo on
huggingface.co. Valid model ids can be located at the root-level, like ``bert-base-uncased``, or
namespaced under a user or organization name, like ``dbmdz/bert-base-german-cased``.
- a path to a `directory` containing a feature extractor file saved using the
:func:`~transformers.feature_extraction_utils.FeatureExtractionMixin.save_pretrained` method, e.g.,
``./my_model_directory/``.
- a path or url to a saved feature extractor JSON `file`, e.g.,
``./my_model_directory/preprocessor_config.json``.
cache_dir (:obj:`str` or :obj:`os.PathLike`, `optional`):
Path to a directory in which a downloaded pretrained model feature extractor should be cached if the
standard cache should not be used.
force_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to force to (re-)download the feature extractor files and override the cached versions
if they exist.
resume_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to delete incompletely received file. Attempts to resume the download if such a file
exists.
proxies (:obj:`Dict[str, str]`, `optional`):
A dictionary of proxy servers to use by protocol or endpoint, e.g., :obj:`{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
use_auth_token (:obj:`str` or `bool`, `optional`):
The token to use as HTTP bearer authorization for remote files. If :obj:`True`, will use the token
generated when running :obj:`transformers-cli login` (stored in :obj:`~/.huggingface`).
revision(:obj:`str`, `optional`, defaults to :obj:`"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so ``revision`` can be any
identifier allowed by git.
return_unused_kwargs (:obj:`bool`, `optional`, defaults to :obj:`False`):
If :obj:`False`, then this function returns just the final feature extractor object. If :obj:`True`,
then this functions returns a :obj:`Tuple(feature_extractor, unused_kwargs)` where `unused_kwargs` is a
dictionary consisting of the key/value pairs whose keys are not feature extractor attributes: i.e., the
part of ``kwargs`` which has not been used to update ``feature_extractor`` and is otherwise ignored.
kwargs (:obj:`Dict[str, Any]`, `optional`):
The values in kwargs of any keys which are feature extractor attributes will be used to override the
loaded values. Behavior concerning key/value pairs whose keys are *not* feature extractor attributes is
controlled by the ``return_unused_kwargs`` keyword parameter.
.. note::
Passing :obj:`use_auth_token=True` is required when you want to use a private model.
Returns:
A feature extractor of type :class:`~transformers.feature_extraction_utils.FeatureExtractionMixin`.
Examples::
# We can't instantiate directly the base class `FeatureExtractionMixin` nor `SequenceFeatureExtractor` so let's show the examples on a
# derived class: `Wav2Vec2FeatureExtractor`
feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h') # Download feature_extraction_config from huggingface.co and cache.
feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained('./test/saved_model/') # E.g. feature_extractor (or model) was saved using `save_pretrained('./test/saved_model/')`
feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained('./test/saved_model/preprocessor_config.json')
feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h', return_attention_mask=False, foo=False)
assert feature_extractor.return_attention_mask is False
feature_extractor, unused_kwargs = Wav2Vec2FeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h', return_attention_mask=False,
foo=False, return_unused_kwargs=True)
assert feature_extractor.return_attention_mask is False
assert unused_kwargs == {'foo': False}
"""
feature_extractor_dict, kwargs = cls.get_feature_extractor_dict(pretrained_model_name_or_path, **kwargs)
return cls.from_dict(feature_extractor_dict, **kwargs)
def save_pretrained(self, save_directory: Union[str, os.PathLike]):
"""
Save a feature_extractor object to the directory ``save_directory``, so that it can be re-loaded using the
:func:`~transformers.feature_extraction_utils.FeatureExtractionMixin.from_pretrained` class method.
Args:
save_directory (:obj:`str` or :obj:`os.PathLike`):
Directory where the feature extractor JSON file will be saved (will be created if it does not exist).
"""
if os.path.isfile(save_directory):
raise AssertionError(f"Provided path ({save_directory}) should be a directory, not a file")
os.makedirs(save_directory, exist_ok=True)
# If we save using the predefined names, we can load using `from_pretrained`
output_feature_extractor_file = os.path.join(save_directory, FEATURE_EXTRACTOR_NAME)
self.to_json_file(output_feature_extractor_file)
logger.info(f"Configuration saved in {output_feature_extractor_file}")
@classmethod
def get_feature_extractor_dict(
cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
"""
From a ``pretrained_model_name_or_path``, resolve to a dictionary of parameters, to be used for instantiating a
feature extractor of type :class:`~transformers.feature_extraction_utils.FeatureExtractionMixin` using
``from_dict``.
Parameters:
pretrained_model_name_or_path (:obj:`str` or :obj:`os.PathLike`):
The identifier of the pre-trained checkpoint from which we want the dictionary of parameters.
Returns:
:obj:`Tuple[Dict, Dict]`: The dictionary(ies) that will be used to instantiate the feature extractor
object.
"""
cache_dir = kwargs.pop("cache_dir", None)
force_download = kwargs.pop("force_download", False)
resume_download = kwargs.pop("resume_download", False)
proxies = kwargs.pop("proxies", None)
use_auth_token = kwargs.pop("use_auth_token", None)
local_files_only = kwargs.pop("local_files_only", False)
revision = kwargs.pop("revision", None)
from_pipeline = kwargs.pop("_from_pipeline", None)
from_auto_class = kwargs.pop("_from_auto", False)
user_agent = {"file_type": "feature extractor", "from_auto_class": from_auto_class}
if from_pipeline is not None:
user_agent["using_pipeline"] = from_pipeline
if is_offline_mode() and not local_files_only:
logger.info("Offline mode: forcing local_files_only=True")
local_files_only = True
pretrained_model_name_or_path = str(pretrained_model_name_or_path)
if os.path.isdir(pretrained_model_name_or_path):
feature_extractor_file = os.path.join(pretrained_model_name_or_path, FEATURE_EXTRACTOR_NAME)
elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):
feature_extractor_file = pretrained_model_name_or_path
else:
feature_extractor_file = hf_bucket_url(
pretrained_model_name_or_path, filename=FEATURE_EXTRACTOR_NAME, revision=revision, mirror=None
)
try:
# Load from URL or cache if already cached
resolved_feature_extractor_file = cached_path(
feature_extractor_file,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
local_files_only=local_files_only,
use_auth_token=use_auth_token,
user_agent=user_agent,
)
# Load feature_extractor dict
with open(resolved_feature_extractor_file, "r", encoding="utf-8") as reader:
text = reader.read()
feature_extractor_dict = json.loads(text)
except EnvironmentError as err:
logger.error(err)
msg = (
f"Can't load feature extractor for '{pretrained_model_name_or_path}'. Make sure that:\n\n"
f"- '{pretrained_model_name_or_path}' is a correct model identifier listed on 'https://huggingface.co/models'\n\n"
f"- or '{pretrained_model_name_or_path}' is the correct path to a directory containing a {FEATURE_EXTRACTOR_NAME} file\n\n"
)
raise EnvironmentError(msg)
except json.JSONDecodeError:
msg = (
f"Couldn't reach server at '{feature_extractor_file}' to download feature extractor configuration file or "
"feature extractor configuration file is not a valid JSON file. "
f"Please check network or file content here: {resolved_feature_extractor_file}."
)
raise EnvironmentError(msg)
if resolved_feature_extractor_file == feature_extractor_file:
logger.info(f"loading feature extractor configuration file {feature_extractor_file}")
else:
logger.info(
f"loading feature extractor configuration file {feature_extractor_file} from cache at {resolved_feature_extractor_file}"
)
return feature_extractor_dict, kwargs
@classmethod
def from_dict(cls, feature_extractor_dict: Dict[str, Any], **kwargs) -> PreTrainedFeatureExtractor:
"""
Instantiates a type of :class:`~transformers.feature_extraction_utils.FeatureExtractionMixin` from a Python
dictionary of parameters.
Args:
feature_extractor_dict (:obj:`Dict[str, Any]`):
Dictionary that will be used to instantiate the feature extractor object. Such a dictionary can be
retrieved from a pretrained checkpoint by leveraging the
:func:`~transformers.feature_extraction_utils.FeatureExtractionMixin.to_dict` method.
kwargs (:obj:`Dict[str, Any]`):
Additional parameters from which to initialize the feature extractor object.
Returns:
:class:`~transformers.feature_extraction_utils.FeatureExtractionMixin`: The feature extractor object
instantiated from those parameters.
"""
return_unused_kwargs = kwargs.pop("return_unused_kwargs", False)
feature_extractor = cls(**feature_extractor_dict)
# Update feature_extractor with kwargs if needed
to_remove = []
for key, value in kwargs.items():
if hasattr(feature_extractor, key):
setattr(feature_extractor, key, value)
to_remove.append(key)
for key in to_remove:
kwargs.pop(key, None)
logger.info(f"Feature extractor {feature_extractor}")
if return_unused_kwargs:
return feature_extractor, kwargs
else:
return feature_extractor
def to_dict(self) -> Dict[str, Any]:
"""
Serializes this instance to a Python dictionary.
Returns:
:obj:`Dict[str, Any]`: Dictionary of all the attributes that make up this feature extractor instance.
"""
output = copy.deepcopy(self.__dict__)
output["feature_extractor_type"] = self.__class__.__name__
return output
@classmethod
def from_json_file(cls, json_file: Union[str, os.PathLike]) -> PreTrainedFeatureExtractor:
"""
Instantiates a feature extractor of type :class:`~transformers.feature_extraction_utils.FeatureExtractionMixin`
from the path to a JSON file of parameters.
Args:
json_file (:obj:`str` or :obj:`os.PathLike`):
Path to the JSON file containing the parameters.
Returns:
A feature extractor of type :class:`~transformers.feature_extraction_utils.FeatureExtractionMixin`: The
feature_extractor object instantiated from that JSON file.
"""
with open(json_file, "r", encoding="utf-8") as reader:
text = reader.read()
feature_extractor_dict = json.loads(text)
return cls(**feature_extractor_dict)
def to_json_string(self) -> str:
"""
Serializes this instance to a JSON string.
Returns:
:obj:`str`: String containing all the attributes that make up this feature_extractor instance in JSON
format.
"""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path: Union[str, os.PathLike]):
"""
Save this instance to a JSON file.
Args:
json_file_path (:obj:`str` or :obj:`os.PathLike`):
Path to the JSON file in which this feature_extractor instance's parameters will be saved.
"""
with open(json_file_path, "w", encoding="utf-8") as writer:
writer.write(self.to_json_string())
def __repr__(self):
return f"{self.__class__.__name__} {self.to_json_string()}"
| 906 | 0 | 331 |
781ab1b68d7175d472282dd2c34637ddada37ab7 | 23,855 | py | Python | tests/test_outputs_handler_matsim_xml_writer.py | arup-group/genet | 24bfbee31da6d7951598adb29ddf17d3a08ed5e6 | [
"MIT"
] | 22 | 2020-12-22T11:11:44.000Z | 2022-03-07T16:25:35.000Z | tests/test_outputs_handler_matsim_xml_writer.py | tkahng/genet | d5c29ed9e44408b60f55d8de889d7430debc9f04 | [
"MIT"
] | 27 | 2020-12-22T09:45:35.000Z | 2022-03-03T14:52:24.000Z | tests/test_outputs_handler_matsim_xml_writer.py | tkahng/genet | d5c29ed9e44408b60f55d8de889d7430debc9f04 | [
"MIT"
] | 7 | 2021-01-02T10:00:05.000Z | 2022-01-06T03:53:43.000Z | import os, sys
import pytest
import lxml
from copy import deepcopy
from shapely.geometry import LineString
from tests.fixtures import network_object_from_test_data, full_fat_default_config_path, assert_semantically_equal
from tests import xml_diff
from genet.outputs_handler import matsim_xml_writer
from genet.core import Network
from genet.schedule_elements import read_vehicle_types
from genet.inputs_handler import read
import xml.etree.cElementTree as ET
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
pt2matsim_network_test_file = os.path.abspath(
os.path.join(os.path.dirname(__file__), "test_data", "matsim", "network.xml"))
pt2matsim_schedule_file = os.path.abspath(
os.path.join(os.path.dirname(__file__), "test_data", "matsim", "schedule.xml"))
pt2matsim_vehicles_file = os.path.abspath(
os.path.join(os.path.dirname(__file__), "test_data", "matsim", "vehicles.xml"))
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
| 51.634199 | 117 | 0.57686 | import os, sys
import pytest
import lxml
from copy import deepcopy
from shapely.geometry import LineString
from tests.fixtures import network_object_from_test_data, full_fat_default_config_path, assert_semantically_equal
from tests import xml_diff
from genet.outputs_handler import matsim_xml_writer
from genet.core import Network
from genet.schedule_elements import read_vehicle_types
from genet.inputs_handler import read
import xml.etree.cElementTree as ET
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
pt2matsim_network_test_file = os.path.abspath(
os.path.join(os.path.dirname(__file__), "test_data", "matsim", "network.xml"))
pt2matsim_schedule_file = os.path.abspath(
os.path.join(os.path.dirname(__file__), "test_data", "matsim", "schedule.xml"))
pt2matsim_vehicles_file = os.path.abspath(
os.path.join(os.path.dirname(__file__), "test_data", "matsim", "vehicles.xml"))
@pytest.fixture
def network_dtd():
dtd_path = os.path.abspath(os.path.join(os.path.dirname(__file__),
"test_data", "dtd", "matsim", "network_v2.dtd"))
yield lxml.etree.DTD(dtd_path)
@pytest.fixture
def schedule_dtd():
dtd_path = os.path.abspath(os.path.join(os.path.dirname(__file__),
"test_data", "dtd", "matsim", "transitSchedule_v2.dtd"))
yield lxml.etree.DTD(dtd_path)
@pytest.fixture
def vehicles_xsd():
xsd_path = os.path.abspath(os.path.join(os.path.dirname(__file__),
"test_data", "dtd", "matsim", "vehicleDefinitions_v1.0.xsd"))
xml_schema_doc = lxml.etree.parse(xsd_path)
yield lxml.etree.XMLSchema(xml_schema_doc)
@pytest.fixture
def vehicle_types():
vehicle_types_config = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'genet',
"configs", "vehicles", "vehicle_definitions.yml"))
return read_vehicle_types(vehicle_types_config)
def test_generates_valid_matsim_network_xml_file(network_object_from_test_data, network_dtd, tmpdir):
matsim_xml_writer.write_matsim_network(tmpdir, network_object_from_test_data)
generated_network_file_path = os.path.join(tmpdir, 'network.xml')
xml_obj = lxml.etree.parse(generated_network_file_path)
assert network_dtd.validate(xml_obj), \
'Doc generated at {} is not valid against DTD due to {}'.format(generated_network_file_path,
network_dtd.error_log.filter_from_errors())
def test_network_from_test_osm_data_produces_valid_matsim_network_xml_file(full_fat_default_config_path, network_dtd,
tmpdir):
osm_test_file = os.path.abspath(
os.path.join(os.path.dirname(__file__), "test_data", "osm", "osm.xml"))
network = read.read_osm(osm_test_file, full_fat_default_config_path, 1, 'epsg:27700')
network.write_to_matsim(tmpdir)
generated_network_file_path = os.path.join(tmpdir, 'network.xml')
xml_obj = lxml.etree.parse(generated_network_file_path)
assert network_dtd.validate(xml_obj), \
'Doc generated at {} is not valid against DTD due to {}'.format(generated_network_file_path,
network_dtd.error_log.filter_from_errors())
def test_network_with_extra_attribs_produces_valid_matsim_network_xml_file(tmpdir, network_dtd):
network = Network('epsg:27700')
network.add_node('0', attribs={'id': '0', 'x': 1, 'y': 2, 'lat': 1, 'lon': 2})
network.add_node('1', attribs={'id': '1', 'x': 2, 'y': 2, 'lat': 2, 'lon': 2})
network.add_link('0', '0', '1', attribs={'id': '0', 'from': '0', 'to': '1', 'length': 1, 'freespeed': 1,
'capacity': 20, 'permlanes': 1, 'oneway': '1', 'modes': ['car'],
'extra_Special_attrib': 12})
network.write_to_matsim(tmpdir)
generated_network_file_path = os.path.join(tmpdir, 'network.xml')
xml_obj = lxml.etree.parse(generated_network_file_path)
assert network_dtd.validate(xml_obj), \
'Doc generated at {} is not valid against DTD due to {}'.format(generated_network_file_path,
network_dtd.error_log.filter_from_errors())
_network_from_file = read.read_matsim(path_to_network=generated_network_file_path, epsg='epsg:27700')
assert_semantically_equal(dict(_network_from_file.nodes()), {
'0': {'id': '0', 'x': 1.0, 'y': 2.0, 'lon': -7.557148039524952, 'lat': 49.766825803756994,
's2_id': 5205973754090365183},
'1': {'id': '1', 'x': 2.0, 'y': 2.0, 'lon': -7.557134218911724, 'lat': 49.766826468710484,
's2_id': 5205973754090480551}})
assert_semantically_equal(dict(_network_from_file.links()), {
'0': {'id': '0', 'from': '0', 'to': '1', 'freespeed': 1.0, 'capacity': 20.0, 'permlanes': 1.0, 'oneway': '1',
'modes': {'car'}, 's2_from': 5205973754090365183, 's2_to': 5205973754090480551, 'length': 1.0}})
def test_tolerates_networks_with_no_oneway_flag_on_links(tmpdir, network_dtd):
network = Network('epsg:27700')
network.add_node('0', attribs={'id': '0', 'x': 1, 'y': 2, 'lat': 1, 'lon': 2})
network.add_node('1', attribs={'id': '1', 'x': 2, 'y': 2, 'lat': 2, 'lon': 2})
network.add_link('0', '0', '1', attribs={
'id': '0',
'from': '0', 'to': '1',
'length': 1,
'freespeed': 1,
'capacity': 20,
'permlanes': 1,
'modes': ['car']
})
network.write_to_matsim(tmpdir)
generated_network_file_path = os.path.join(tmpdir, 'network.xml')
xml_obj = lxml.etree.parse(generated_network_file_path)
assert network_dtd.validate(xml_obj), \
'Doc generated at {} is not valid against DTD due to {}'.format(generated_network_file_path,
network_dtd.error_log.filter_from_errors())
_network_from_file = read.read_matsim(path_to_network=generated_network_file_path, epsg='epsg:27700')
assert_semantically_equal(dict(_network_from_file.nodes()), {
'0': {'id': '0', 'x': 1.0, 'y': 2.0, 'lon': -7.557148039524952, 'lat': 49.766825803756994,
's2_id': 5205973754090365183},
'1': {'id': '1', 'x': 2.0, 'y': 2.0, 'lon': -7.557134218911724, 'lat': 49.766826468710484,
's2_id': 5205973754090480551}})
assert_semantically_equal(dict(_network_from_file.links()), {
'0': {
'id': '0',
'from': '0',
'to': '1',
'freespeed': 1.0,
'capacity': 20.0,
'permlanes': 1.0,
'modes': {'car'},
's2_from': 5205973754090365183,
's2_to': 5205973754090480551,
'length': 1.0
}
})
def test_network_with_attribs_doesnt_loose_any_attributes_after_saving(tmpdir):
network = Network('epsg:27700')
network.add_node('0', attribs={'id': '0', 'x': 1, 'y': 2, 'lat': 1, 'lon': 2})
network.add_node('1', attribs={'id': '1', 'x': 2, 'y': 2, 'lat': 2, 'lon': 2})
network.add_link('0', '0', '1', attribs={'id': '0', 'from': '0', 'to': '1', 'length': 1, 'freespeed': 1,
'capacity': 20, 'permlanes': 1, 'oneway': '1', 'modes': ['car'],
'extra_Special_attrib': 12})
network.add_link('0', '0', '1', attribs={'id': '0', 'from': '0', 'to': '1', 'length': 1, 'freespeed': 1,
'capacity': 20, 'permlanes': 1, 'oneway': '1', 'modes': ['car'],
'attributes': {
'osm:way:lanes': {'name': 'osm:way:lanes',
'class': 'java.lang.String',
'text': '3'}}})
link_attributes = deepcopy(dict(network.links()))
node_attributes = deepcopy(dict(network.nodes()))
network.write_to_matsim(tmpdir)
link_attributes_post_save = dict(network.links())
node_attributes_post_save = dict(network.nodes())
assert_semantically_equal(link_attributes_post_save, link_attributes)
assert_semantically_equal(node_attributes_post_save, node_attributes)
def test_saving_network_with_geometry_doesnt_change_data_on_the_network(tmpdir):
network = Network('epsg:27700')
network.add_node('0', attribs={'id': '0', 'x': 1, 'y': 2, 'lat': 1, 'lon': 2})
network.add_node('1', attribs={'id': '1', 'x': 2, 'y': 2, 'lat': 2, 'lon': 2})
network.add_link('0', '0', '1', attribs={'id': '0', 'from': '0', 'to': '1', 'length': 1, 'freespeed': 1,
'capacity': 20, 'permlanes': 1, 'oneway': '1', 'modes': ['car'],
'geometry': LineString([(1,2), (2,3), (3,4)]),
'extra_Special_attrib': 12})
network.add_link('0', '0', '1', attribs={'id': '0', 'from': '0', 'to': '1', 'length': 1, 'freespeed': 1,
'capacity': 20, 'permlanes': 1, 'oneway': '1', 'modes': ['car'],
'geometry': LineString([(1,2), (2,3), (3,4)]),
'attributes': {
'osm:way:lanes': {'name': 'osm:way:lanes',
'class': 'java.lang.String',
'text': '3'}}})
link_attributes = deepcopy(dict(network.links()))
node_attributes = deepcopy(dict(network.nodes()))
network.write_to_matsim(tmpdir)
link_attributes_post_save = dict(network.links())
node_attributes_post_save = dict(network.nodes())
assert_semantically_equal(link_attributes_post_save, link_attributes)
assert_semantically_equal(node_attributes_post_save, node_attributes)
def test_saving_network_with_geometry_produces_correct_polyline_in_link_attributes(tmpdir):
network = Network('epsg:27700')
network.add_node('0', attribs={'id': '0', 'x': 1, 'y': 2, 'lat': 1, 'lon': 2})
network.add_node('1', attribs={'id': '1', 'x': 2, 'y': 2, 'lat': 2, 'lon': 2})
network.add_link('0', '0', '1', attribs={'id': '0', 'from': '0', 'to': '1', 'length': 1, 'freespeed': 1,
'capacity': 20, 'permlanes': 1, 'oneway': '1', 'modes': ['car'],
'geometry': LineString([(1,2), (2,3), (3,4)]),
'extra_Special_attrib': 12})
network.write_to_matsim(tmpdir)
found_geometry_attrib = False
for event, elem in ET.iterparse(os.path.join(tmpdir, 'network.xml'), events=('start', 'end')):
if event == 'start':
if elem.tag == 'attribute':
if elem.attrib['name'] == 'geometry':
assert elem.text == '_ibE_seK_ibE_ibE_ibE_ibE'
found_geometry_attrib = True
assert found_geometry_attrib
def test_saving_network_with_wrongly_formatted_attributes_with_geometry(tmpdir):
# attributes are assumed to be a nested dictionary of very specific format. Due to the fact that user can
# do virtually anything to edge attributes, or due to calculation error, this may not be the case. If it's not
# of correct format, we don't expect it to get saved to the matsim network.xml
network = Network('epsg:27700')
network.add_node('0', attribs={'id': '0', 'x': 1, 'y': 2, 'lat': 1, 'lon': 2})
network.add_node('1', attribs={'id': '1', 'x': 2, 'y': 2, 'lat': 2, 'lon': 2})
link_attribs = {'id': '0', 'from': '0', 'to': '1', 'length': 1, 'freespeed': 1,
'capacity': 20, 'permlanes': 1, 'oneway': '1', 'modes': ['car'],
'geometry': LineString([(1,2), (2,3), (3,4)]),
'attributes': {'heyo': 'whoop'}
}
network.add_link('0', '0', '1', attribs=link_attribs)
network.write_to_matsim(tmpdir)
assert_semantically_equal(dict(network.links()), {'0': link_attribs})
assert_semantically_equal(matsim_xml_writer.check_link_attributes(link_attribs),
{'id': '0', 'from': '0', 'to': '1', 'length': 1, 'freespeed': 1,
'capacity': 20, 'permlanes': 1, 'oneway': '1', 'modes': ['car'],
'geometry': LineString([(1, 2), (2, 3), (3, 4)])
}
)
found_geometry_attrib = False
for event, elem in ET.iterparse(os.path.join(tmpdir, 'network.xml'), events=('start', 'end')):
if event == 'start':
if elem.tag == 'attribute':
if elem.attrib['name'] == 'geometry':
assert elem.text == '_ibE_seK_ibE_ibE_ibE_ibE'
found_geometry_attrib = True
assert found_geometry_attrib
def test_saving_network_with_bonkers_attributes_with_geometry(tmpdir):
# attributes are assumed to be a nested dictionary of very specific format. Due to the fact that user can
# do virtually anything to edge attributes, or due to calculation error, this may not be the case. If it's not
# of correct format, we don't expect it to get saved to the matsim network.xml
network = Network('epsg:27700')
network.add_node('0', attribs={'id': '0', 'x': 1, 'y': 2, 'lat': 1, 'lon': 2})
network.add_node('1', attribs={'id': '1', 'x': 2, 'y': 2, 'lat': 2, 'lon': 2})
link_attribs = {'id': '0', 'from': '0', 'to': '1', 'length': 1, 'freespeed': 1,
'capacity': 20, 'permlanes': 1, 'oneway': '1', 'modes': ['car'],
'geometry': LineString([(1,2), (2,3), (3,4)]),
'attributes': float('nan')
}
network.add_link('0', '0', '1', attribs=link_attribs)
network.write_to_matsim(tmpdir)
assert_semantically_equal(dict(network.links()), {'0': link_attribs})
assert_semantically_equal(matsim_xml_writer.check_link_attributes(link_attribs),
{'id': '0', 'from': '0', 'to': '1', 'length': 1, 'freespeed': 1,
'capacity': 20, 'permlanes': 1, 'oneway': '1', 'modes': ['car'],
'geometry': LineString([(1, 2), (2, 3), (3, 4)])
}
)
found_geometry_attrib = False
for event, elem in ET.iterparse(os.path.join(tmpdir, 'network.xml'), events=('start', 'end')):
if event == 'start':
if elem.tag == 'attribute':
if elem.attrib['name'] == 'geometry':
assert elem.text == '_ibE_seK_ibE_ibE_ibE_ibE'
found_geometry_attrib = True
assert found_geometry_attrib
def test_saving_network_with_correct_attributes_and_geometry(tmpdir):
# attributes are assumed to be a nested dictionary of very specific format. Due to the fact that user can
# do virtually anything to edge attributes, or due to calculation error, this may not be the case. If it's not
# of correct format, we don't expect it to get saved to the matsim network.xml
network = Network('epsg:27700')
network.add_node('0', attribs={'id': '0', 'x': 1, 'y': 2, 'lat': 1, 'lon': 2})
network.add_node('1', attribs={'id': '1', 'x': 2, 'y': 2, 'lat': 2, 'lon': 2})
link_attribs = {'id': '0', 'from': '0', 'to': '1', 'length': 1, 'freespeed': 1,
'capacity': 20, 'permlanes': 1, 'oneway': '1', 'modes': ['car'],
'geometry': LineString([(1,2), (2,3), (3,4)]),
'attributes': {
'osm:way:lanes': {'name': 'osm:way:lanes',
'class': 'java.lang.String',
'text': '3'}
}
}
network.add_link('0', '0', '1', attribs=link_attribs)
network.write_to_matsim(tmpdir)
assert_semantically_equal(dict(network.links()), {'0': link_attribs})
assert_semantically_equal(matsim_xml_writer.check_link_attributes(link_attribs), link_attribs)
found_geometry_attrib = False
for event, elem in ET.iterparse(os.path.join(tmpdir, 'network.xml'), events=('start', 'end')):
if event == 'start':
if elem.tag == 'attribute':
if elem.attrib['name'] == 'geometry':
assert elem.text == '_ibE_seK_ibE_ibE_ibE_ibE'
found_geometry_attrib = True
assert found_geometry_attrib
def test_saving_network_with_geometry_produces_polyline_if_link_already_has_other_attributes(tmpdir):
network = Network('epsg:27700')
network.add_node('0', attribs={'id': '0', 'x': 1, 'y': 2, 'lat': 1, 'lon': 2})
network.add_node('1', attribs={'id': '1', 'x': 2, 'y': 2, 'lat': 2, 'lon': 2})
network.add_link('0', '0', '1', attribs={'id': '0', 'from': '0', 'to': '1', 'length': 1, 'freespeed': 1,
'capacity': 20, 'permlanes': 1, 'oneway': '1', 'modes': ['car'],
'geometry': LineString([(1,2), (2,3), (3,4)]),
'attributes': {
'osm:way:lanes': {'name': 'osm:way:lanes',
'class': 'java.lang.String',
'text': '3'}}})
network.write_to_matsim(tmpdir)
found_geometry_attrib = False
for event, elem in ET.iterparse(os.path.join(tmpdir, 'network.xml'), events=('start', 'end')):
if event == 'start':
if elem.tag == 'attribute':
if elem.attrib['name'] == 'geometry':
assert elem.text == '_ibE_seK_ibE_ibE_ibE_ibE'
found_geometry_attrib = True
assert found_geometry_attrib
def test_write_matsim_network_produces_semantically_equal_xml_to_input_matsim_xml(network_object_from_test_data,
tmpdir):
matsim_xml_writer.write_matsim_network(tmpdir, network_object_from_test_data)
xml_diff.assert_semantically_equal(os.path.join(tmpdir, 'network.xml'), pt2matsim_network_test_file)
def test_generates_valid_matsim_schedule_xml_file(network_object_from_test_data, schedule_dtd, tmpdir):
matsim_xml_writer.write_matsim_schedule(tmpdir, network_object_from_test_data.schedule)
generated_file_path = os.path.join(tmpdir, 'schedule.xml')
xml_obj = lxml.etree.parse(generated_file_path)
assert schedule_dtd.validate(xml_obj), \
'Doc generated at {} is not valid against DTD due to {} errors - first error {}' \
.format(generated_file_path,
len(schedule_dtd.error_log.filter_from_errors()),
schedule_dtd.error_log.filter_from_errors()[0])
def test_write_matsim_schedule_produces_semantically_equal_xml_to_input_matsim_xml(network_object_from_test_data,
tmpdir):
matsim_xml_writer.write_matsim_schedule(tmpdir, network_object_from_test_data.schedule)
xml_diff.assert_semantically_equal(os.path.join(tmpdir, 'schedule.xml'), pt2matsim_schedule_file)
def test_write_matsim_schedule_produces_semantically_equal_xml_to_input_matsim_xml_if_stops_need_to_reprojected(
network_object_from_test_data, tmpdir):
# we change all the stops in the one service and one route that exists in the test data
network_object_from_test_data.schedule.route('VJbd8660f05fe6f744e58a66ae12bd66acbca88b98').reproject('epsg:3035')
matsim_xml_writer.write_matsim_schedule(tmpdir, network_object_from_test_data.schedule)
xml_diff.assert_semantically_equal(os.path.join(tmpdir, 'schedule.xml'), pt2matsim_schedule_file)
def test_generates_valid_matsim_vehicles_xml_file(tmpdir, vehicles_xsd, vehicle_types):
vehicle_dict = {
'veh_1': {'type': 'bus'},
'veh_2': {'type': 'bus'},
'veh_3': {'type': 'bus'},
'veh_4': {'type': 'tram'},
'veh_5': {'type': 'rail'},
'veh_6': {'type': 'subway'}
}
matsim_xml_writer.write_vehicles(tmpdir, vehicle_dict, vehicle_types)
generated_file_path = os.path.join(tmpdir, 'vehicles.xml')
xml_obj = lxml.etree.parse(generated_file_path)
vehicles_xsd.assertValid(xml_obj)
def test_generates_matsim_vehicles_xml_file_containing_expected_vehicle_types(tmpdir, vehicle_types):
vehicle_dict = {
'veh_1': {'type': 'bus'},
'veh_2': {'type': 'bus'},
'veh_3': {'type': 'bus'},
'veh_4': {'type': 'tram'},
'veh_5': {'type': 'rail'},
'veh_6': {'type': 'subway'}
}
matsim_xml_writer.write_vehicles(tmpdir, vehicle_dict, vehicle_types)
generated_file_path = os.path.join(tmpdir, 'vehicles.xml')
xml_obj = lxml.etree.parse(generated_file_path)
vehicle_types = xml_obj.findall('{http://www.matsim.org/files/dtd}vehicleType')
expected_vehicle_types = {v['type'] for k,v in vehicle_dict.items()}
actual_vehicle_types = set()
for vehicle_type in vehicle_types:
actual_vehicle_types.add(vehicle_type.get('id'))
assert expected_vehicle_types == actual_vehicle_types
def test_generates_matsim_vehicles_xml_file_containing_expected_vehicles(tmpdir, vehicle_types):
vehicle_dict = {
'veh_1': {'type': 'bus'},
'veh_2': {'type': 'bus'},
'veh_3': {'type': 'bus'},
'veh_4': {'type': 'tram'},
'veh_5': {'type': 'rail'},
'veh_6': {'type': 'subway'}
}
matsim_xml_writer.write_vehicles(tmpdir, vehicle_dict, vehicle_types)
generated_file_path = os.path.join(tmpdir, 'vehicles.xml')
xml_obj = lxml.etree.parse(generated_file_path)
vehicles = xml_obj.findall('{http://www.matsim.org/files/dtd}vehicle')
assert len(vehicles) == len(vehicle_dict)
for vehicle in vehicles:
assert vehicle_dict[vehicle.get('id')]['type'] == vehicle.get('type')
def test_throws_exception_when_generating_vehicles_xml_from_unrecognised_vehicle_types(tmpdir, vehicle_types):
vehicle_dict = {
'veh_1': {'type': 'bus'},
'veh_4': {'type': 'tram'},
'veh_5': {'type': 'rocket ship'},
}
with pytest.raises(NotImplementedError) as e:
matsim_xml_writer.write_vehicles(tmpdir, vehicle_dict, vehicle_types)
assert 'No Vehicle Type info available for mode rocket ship' in str(e.value)
def test_write_matsim_vehicles_produces_semantically_equal_xml_to_input_matsim_xml(network_object_from_test_data,
tmpdir):
network = network_object_from_test_data
matsim_xml_writer.write_matsim_schedule(tmpdir, network.schedule)
matsim_xml_writer.write_vehicles(tmpdir, network.schedule.vehicles, network.schedule.vehicle_types)
xml_diff.assert_semantically_equal(os.path.join(tmpdir, 'vehicles.xml'), pt2matsim_vehicles_file)
| 22,287 | 0 | 548 |
6fc066cfaa85a48df43cc8e9329854aaebfcadd0 | 7,123 | py | Python | lesson04/likangwen/lession04_hw.py | herrywen-nanj/51reboot | 1130c79a360e1b548a6eaad176eb60f8bed22f40 | [
"Apache-2.0"
] | null | null | null | lesson04/likangwen/lession04_hw.py | herrywen-nanj/51reboot | 1130c79a360e1b548a6eaad176eb60f8bed22f40 | [
"Apache-2.0"
] | null | null | null | lesson04/likangwen/lession04_hw.py | herrywen-nanj/51reboot | 1130c79a360e1b548a6eaad176eb60f8bed22f40 | [
"Apache-2.0"
] | null | null | null | import sys
import getpass
import json
from prettytable import PrettyTable
import pandas
import logging
import os.path
import time
# 定义变量
RESULT = {}
# USERINFO = ("admin", "123456")
USERINFO = ("a", "a")
FIELDS = ['name', 'age', 'tel', 'email']
# RESULT.append(FIELDS)
FORMAT = """
====================================================================
1.表字段格式
username age tel email
2. 增删改查和搜索
2.1 增 add # add monkey 12 132xxx monkey@51reboot.com
2.2 删 delete # delete monkey
2.3 改 update # update monkey set age = 18
2.4 查 list # list
2.5 搜 find # find monkey
2.6 分页 display # display page 1 pagesize 5
2.7 保存csv格式,可跟上名称,否则默认 # export csvname
2.8 帮助文档 # 'h' or 'help'
===================================================================
"""
# 日志函数
# 读取文件里的数据
# 持久化
# 添加用户函数
# 分页
# 保存为csv文件
# 删除用户函数
# 修改用户函数
# 打印成表格的函数
# 按需打印用户函数
# 查找用户函数
if __name__ == '__main__':
main() | 28.955285 | 106 | 0.491226 | import sys
import getpass
import json
from prettytable import PrettyTable
import pandas
import logging
import os.path
import time
# 定义变量
RESULT = {}
# USERINFO = ("admin", "123456")
USERINFO = ("a", "a")
FIELDS = ['name', 'age', 'tel', 'email']
# RESULT.append(FIELDS)
FORMAT = """
====================================================================
1.表字段格式
username age tel email
2. 增删改查和搜索
2.1 增 add # add monkey 12 132xxx monkey@51reboot.com
2.2 删 delete # delete monkey
2.3 改 update # update monkey set age = 18
2.4 查 list # list
2.5 搜 find # find monkey
2.6 分页 display # display page 1 pagesize 5
2.7 保存csv格式,可跟上名称,否则默认 # export csvname
2.8 帮助文档 # 'h' or 'help'
===================================================================
"""
# 日志函数
def User_log(msg):
logging.basicConfig(level=logging.DEBUG,
filename='./log.txt',
filemode='a',
format='%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s')
logging.debug(msg)
# 读取文件里的数据
def load():
fd = open('kw.txt', 'r')
# 异常处理如果文件里面不存在任何内容,那么什么都不做
try:
data = json.load(fd)
RESULT.update(data)
except Exception:
pass
# 持久化
def save():
fd = open('kw.txt', 'w')
fd.write(json.dumps(RESULT))
fd.close()
# 添加用户函数
def add(info_list):
USER_MSG = {}
if len(info_list) == 4:
username = info_list[0]
if username in RESULT:
print("用户已存在,请重新添加")
else:
RESULT[username] = dict(zip(FIELDS, info_list))
save()
User_log("user {} add success" .format(username))
print("添加用户成功")
else:
print("\033[0m请输入正确格式\033[0m {}".format("add monkey 12 132xxx monkey@51reboot.com"))
# 分页
def display(info_list):
if len(info_list) >= 2 and len(info_list) <= 4:
pagesize = 5
if len(info_list) == 2:
if info_list[0] == "page":
pagesize = 5
else:
print("请重新输入查询语句")
else:
if info_list[0] == "page" and info_list[2] == "pagesize":
pagesize = int(info_list[-1])
else:
print("请重新输入查询语句")
page = int(info_list[1])
data = []
for k, v in RESULT.items():
data.append(v.values())
end = page * pagesize
pretable(data[end-pagesize: end])
# 保存为csv文件
def export(info_list):
data_list = []
file_name = "kw"
if len(info_list):
file_name = info_list.pop(0)
if len(RESULT):
pt_fields = list(list(RESULT.values())[0].keys())
for u_k, u_v in RESULT.items():
data_list.append(list(u_v.values()))
pd = pandas.DataFrame(columns=pt_fields, data=data_list)
pd.to_csv('{}.csv'.format(file_name), encoding='utf_8_sig') # 防止中文乱码
else:
print("数据为空,请添加数据")
# 删除用户函数
def delete(info_list):
for u_k, u_v in RESULT.items():
if u_k == info_list[0]:
RESULT.pop(u_k)
save()
User_log("user {} delete success".format(u_k))
return "{}用户删除成功".format(info_list[0])
return "删除失败,用户列表查无{}此用户".format(info_list[0])
# 修改用户函数
def update(info_list):
for u_k, u_v in RESULT.items():
if u_k == info_list[0]:
try:
location_index = info_list.index("=") # 获取 = 在哪个位置
key_name = info_list[location_index - 1] # 获取要修改的参数,如age,username等
value_name = info_list[location_index + 1] # 获取要修改的参数的值
if key_name in FIELDS:
if key_name == "name":
RESULT[value_name] = RESULT.pop(u_k) #修改外层key(name)的名称,保持外层和里面的name名称是一致的
u_v[key_name] = value_name # 修改里层的字典对应的key的value
# 数据保存到文件
save()
return "{}用户修改成功".format(value_name)
else:
return "{},无此字段参数".format(key_name)
except Exception:
return "请输入正确的参数"
return "{}用户修改失败,用户列表查无此用户".format(info_list[0])
# 打印成表格的函数
def pretable(data):
if len(data):
if type(data) == dict:
pt_fields = list(list(data.values())[0].keys())
x = PrettyTable()
x.field_names = pt_fields
for u_k, u_v in data.items():
x.add_row(list(u_v.values()))
print(x)
elif type(data) == list:
x = PrettyTable()
x.field_names = FIELDS
for user_list in data:
x.add_row(user_list)
print(x)
else:
print("暂无数据,请添加数据。")
# 按需打印用户函数
def user_list(ret_dict=None):
if ret_dict:
pretable(ret_dict)
else:
pretable(RESULT)
# 查找用户函数
def find(info_list):
find_list = []
for u_k, u_v in RESULT.items():
# 判断查找的用户是否存在于列表里
if u_k == info_list[0]:
# return u_v
return {u_k:u_v}
print("用户列表查无{}此用户".format(info_list[0]))
def main():
INIT_FAIL_CNT = 0
MAX_FAIL_CNT = 6
while INIT_FAIL_CNT < MAX_FAIL_CNT:
username = input("Please input your username: ")
password = input("Please input your password: ")
# password = getpass.getpass("Please input your password: ")
if username == USERINFO[0] and password == USERINFO[1]:
# 提示增删改查操作
print("输入'h'或者 'help'查看帮助文档")
while True:
info = input("Please input your operation: ").lower()
if not info: # 当直接回车时不会报错
continue
info_list = info.split()
try: # 异常处理
action = info_list.pop(0)
except:
pass
if action == "add":
add(info_list)
elif action == "delete":
ret = delete(info_list)
print(ret)
elif action == "update":
ret = update(info_list)
print(ret)
elif action == "list":
user_list(info_list)
elif action == "find":
ret = find(info_list)
if ret:
user_list(ret)
elif action == "exit":
sys.exit(1)
elif action == "display":
ret = display(info_list)
if ret:
user_list(ret)
elif action == "export":
export(info_list)
elif action == "load":
load()
elif action.lower() == "h" or action.lower() == "help":
print(FORMAT)
else:
print("Syntax error")
print(FORMAT)
else:
print("账号或密码错误")
INIT_FAIL_CNT += 1
print("密码错误次数超过6次, 系统退出")
if __name__ == '__main__':
main() | 6,460 | 0 | 265 |
d33ec8e728c20478a449e26f8c58c3e1196d416e | 8,511 | py | Python | troposphere_mate/core/orchestration.py | tsuttsu305/troposphere_mate-project | 15ee94cc913efb32bc991979efcad943c992074c | [
"MIT"
] | 10 | 2019-07-08T14:52:16.000Z | 2021-10-15T22:18:22.000Z | troposphere_mate/core/orchestration.py | tsuttsu305/troposphere_mate-project | 15ee94cc913efb32bc991979efcad943c992074c | [
"MIT"
] | 1 | 2019-07-08T00:36:50.000Z | 2019-07-08T00:36:50.000Z | troposphere_mate/core/orchestration.py | tsuttsu305/troposphere_mate-project | 15ee94cc913efb32bc991979efcad943c992074c | [
"MIT"
] | 2 | 2020-03-22T14:44:54.000Z | 2020-08-05T02:08:01.000Z | # -*- coding: utf-8 -*-
"""
Implement a Orchestration Framework.
"""
try:
from typing import List, Tuple, Dict, Type
except:
pass
import attr
from collections import OrderedDict
from pathlib_mate import PathCls as Path
from .mate import AWSObject, Template
from .canned import Canned
def resolve_pipeline(plan):
"""
:type plan: List[Tuple[str, str]]
:param plan: [(can_id, tag), ...]
:rtype: List[Tuple[List[str], str]]]
"""
pipeline_change_set = list()
job = ([], None)
previous_env = None
for tier_name, tier_env in plan:
if tier_env != previous_env:
pipeline_change_set.append(job)
previous_env = tier_env
job = ([tier_name, ], tier_env)
else:
job[0].append(tier_name)
pipeline_change_set.append(job)
pipeline_change_set = pipeline_change_set[1:]
dct = dict()
pipeline = list()
for tier_list, tier_env in pipeline_change_set:
if tier_env in dct:
dct[tier_env].extend(tier_list)
else:
dct[tier_env] = tier_list
pipeline.append((list(dct[tier_env]), tier_env))
return pipeline
@attr.s
class CanLabel(object):
"""
A wrapper around a ``troposphere_mate.Canned``. It defines the metadata
about the ``Canned``
**中文文档**
在 ``Canned`` 之外的进一层包装. ``logic_id`` 是当 ``Canned`` 封装的 Template 会
被作为 Nested Stack 时起作用的. 因为 ``troposphere`` 实现的 Template 可能在其他
Template 中作为 ``AWS::CloudFormation::Stack`` Resource 使用. 作为
Nested Stack 是不知道 Master Stack 中的 Resource Logic Id 的. ``filename``
则是指定了实体文件的文件名. 因为 ``Template`` 本身只关注模板数据, 不关注模板文件.
CanLabel 实现了 Y 轴上的编排.
"""
logic_id = attr.ib() # type: str
can_class = attr.ib() # type: Type[Canned]
filename = attr.ib() # type: str
@attr.s
class ConfigData(object):
"""
**中文文档**
一串的 CanLabel (本质上是一串原子的 Nested Stack, 要么该 Stack 中的资源被全部
创建, 要么全部不被创建) 构成了一个架构的设计. 而这个架构的设计可能被部署到不同的环境中,
在不同的环境中, 配置数据可能不同, 实际被部署的 Nested Stack 的数量也可能不同.
ConfigData 提供了在不同环境下 (用 env_tag 做区分) 的配置数据.
ConfigData 实现了 X 轴上的编排.
"""
env_tag = attr.ib() # type: str
data = attr.ib() # type: dict
@attr.s
# ---
@attr.s
class TemplateFile(object):
"""
**中文文档**
包含了 ``troposphere_mate.Template`` 的实例 以及实际的文件路径 (绝对路径)
"""
template = attr.ib() # type: Template
filepath = attr.ib() # type: str
@filepath.validator
@attr.s
class ExecutionJob(object):
"""
**中文文档**
每个 ExecutionJob 对应一次 ``aws cloudformation deploy`` 命令的执行.
本质上一个 ExecutionJob 包含了一串最终的 Template 文件实体. 所以我们需要知道
Master Template 的路径, 以及所有的 Template 的数据以及路径.
"""
master_can = attr.ib() # type: Canned
master_template_path = attr.ib() # type: str
template_file_list = attr.ib() # type: List[TemplateFile]
class Orchestration(object):
"""
**中文文档**
Orchestration 的本质是对 CanLabel 和 ConfigData 进行编排. 使用:
``CanLabel.logic_id`` 和 ``ConfigData.env_tag`` 指定了编排中的某个最小单元,
通过指定云架构部署的顺序, 最终实现编排.
"""
def __init__(self,
master_canlabel_id,
canlabel_list,
config_data_list,
notes):
"""
:type master_canlabel_id: str
:type canlabel_list: List[CanLabel]
:type config_data_list: List[ConfigData]
:type notes: List[Note]
"""
self.master_canlabel_id = master_canlabel_id # type: str
self.canlabel_mapper = OrderedDict([
(canlabel.logic_id, canlabel)
for canlabel in canlabel_list
]) # type: Dict[str, CanLabel]
self.config_data_mapper = OrderedDict([
(config_data.env_tag, config_data)
for config_data in config_data_list
]) # type: Dict[str, ConfigData]
self.notes = notes # type: List[Note]
# print(self.canlabel_mapper[self.master_canlabel_id])
| 29.655052 | 99 | 0.602397 | # -*- coding: utf-8 -*-
"""
Implement a Orchestration Framework.
"""
try:
from typing import List, Tuple, Dict, Type
except:
pass
import attr
from collections import OrderedDict
from pathlib_mate import PathCls as Path
from .mate import AWSObject, Template
from .canned import Canned
def resolve_pipeline(plan):
"""
:type plan: List[Tuple[str, str]]
:param plan: [(can_id, tag), ...]
:rtype: List[Tuple[List[str], str]]]
"""
pipeline_change_set = list()
job = ([], None)
previous_env = None
for tier_name, tier_env in plan:
if tier_env != previous_env:
pipeline_change_set.append(job)
previous_env = tier_env
job = ([tier_name, ], tier_env)
else:
job[0].append(tier_name)
pipeline_change_set.append(job)
pipeline_change_set = pipeline_change_set[1:]
dct = dict()
pipeline = list()
for tier_list, tier_env in pipeline_change_set:
if tier_env in dct:
dct[tier_env].extend(tier_list)
else:
dct[tier_env] = tier_list
pipeline.append((list(dct[tier_env]), tier_env))
return pipeline
class ResourceFilter(object):
def __init__(self, allowed_stack_id_list):
self.allowed_stack_id_list = allowed_stack_id_list
def filter(self, resource, template):
"""
Check if we want to keep this resource in the cloudformation.
If ``True``, we keep it. if ``False`` we call
``Template.remove_resource(resource)`` to remove it,
:type resource: AWSObject
:type template: Template
:rtype: bool
"""
# if resource.
if resource.resource_type == "AWS::CloudFormation::Stack":
if resource.title in self.allowed_stack_id_list:
return True
else:
return False
else:
return True
@attr.s
class CanLabel(object):
"""
A wrapper around a ``troposphere_mate.Canned``. It defines the metadata
about the ``Canned``
**中文文档**
在 ``Canned`` 之外的进一层包装. ``logic_id`` 是当 ``Canned`` 封装的 Template 会
被作为 Nested Stack 时起作用的. 因为 ``troposphere`` 实现的 Template 可能在其他
Template 中作为 ``AWS::CloudFormation::Stack`` Resource 使用. 作为
Nested Stack 是不知道 Master Stack 中的 Resource Logic Id 的. ``filename``
则是指定了实体文件的文件名. 因为 ``Template`` 本身只关注模板数据, 不关注模板文件.
CanLabel 实现了 Y 轴上的编排.
"""
logic_id = attr.ib() # type: str
can_class = attr.ib() # type: Type[Canned]
filename = attr.ib() # type: str
@attr.s
class ConfigData(object):
"""
**中文文档**
一串的 CanLabel (本质上是一串原子的 Nested Stack, 要么该 Stack 中的资源被全部
创建, 要么全部不被创建) 构成了一个架构的设计. 而这个架构的设计可能被部署到不同的环境中,
在不同的环境中, 配置数据可能不同, 实际被部署的 Nested Stack 的数量也可能不同.
ConfigData 提供了在不同环境下 (用 env_tag 做区分) 的配置数据.
ConfigData 实现了 X 轴上的编排.
"""
env_tag = attr.ib() # type: str
data = attr.ib() # type: dict
@attr.s
class Note(object):
can_id = attr.ib() # type: str
env_tag = attr.ib() # type: str
# ---
@attr.s
class TemplateFile(object):
"""
**中文文档**
包含了 ``troposphere_mate.Template`` 的实例 以及实际的文件路径 (绝对路径)
"""
template = attr.ib() # type: Template
filepath = attr.ib() # type: str
@filepath.validator
def check_filepath(self, attribute, value):
if not Path(value).is_absolute():
raise ValueError(
"You have to use absolute path for 'TemplateFile.filepath`!")
def make_file(self, json_or_yml="json"):
self.template.to_file(self.filepath, json_or_yml=json_or_yml)
@attr.s
class ExecutionJob(object):
"""
**中文文档**
每个 ExecutionJob 对应一次 ``aws cloudformation deploy`` 命令的执行.
本质上一个 ExecutionJob 包含了一串最终的 Template 文件实体. 所以我们需要知道
Master Template 的路径, 以及所有的 Template 的数据以及路径.
"""
master_can = attr.ib() # type: Canned
master_template_path = attr.ib() # type: str
template_file_list = attr.ib() # type: List[TemplateFile]
def execute(self):
self.master_can.dump_shell_script_json_config_file()
self.master_can.dump_cloudformation_json_config_file()
for template_file in self.template_file_list:
template_file.make_file(json_or_yml="json")
class Orchestration(object):
"""
**中文文档**
Orchestration 的本质是对 CanLabel 和 ConfigData 进行编排. 使用:
``CanLabel.logic_id`` 和 ``ConfigData.env_tag`` 指定了编排中的某个最小单元,
通过指定云架构部署的顺序, 最终实现编排.
"""
def __init__(self,
master_canlabel_id,
canlabel_list,
config_data_list,
notes):
"""
:type master_canlabel_id: str
:type canlabel_list: List[CanLabel]
:type config_data_list: List[ConfigData]
:type notes: List[Note]
"""
self.master_canlabel_id = master_canlabel_id # type: str
self.canlabel_mapper = OrderedDict([
(canlabel.logic_id, canlabel)
for canlabel in canlabel_list
]) # type: Dict[str, CanLabel]
self.config_data_mapper = OrderedDict([
(config_data.env_tag, config_data)
for config_data in config_data_list
]) # type: Dict[str, ConfigData]
self.notes = notes # type: List[Note]
# print(self.canlabel_mapper[self.master_canlabel_id])
def plan(self, temp_dir):
pipeline = resolve_pipeline([
(note.can_id, note.env_tag)
for note in self.notes
])
nested_can_mapper = dict() # type: Dict[str, Canned]
returned_list = list()
STOP_AT_IND = 4
counter = 0
for can_id_list, env_tag in pipeline:
counter += 1
deploy_workspace_dir = Path(
temp_dir, "{}-{}".format(str(counter).zfill(3), env_tag))
deploy_workspace_dir.mkdir(parents=True, exist_ok=True)
returned_list.append(deploy_workspace_dir)
template_file_list = list()
config_data = self.config_data_mapper[env_tag].data
master_can_label = self.canlabel_mapper[self.master_canlabel_id]
master_can = master_can_label.can_class(**config_data)
master_can.CONFIG_DIR = deploy_workspace_dir.abspath
master_can.create_template()
master_template_path = Path(
deploy_workspace_dir, master_can_label.filename)
template_file_list.append(
TemplateFile(
template=master_can.template,
filepath=master_template_path,
)
)
# construct resource filter
# based on two
# 1. The current execution job's ``CanLabel.logic_id`` (Nested Stack Resource Logic Id)
# 2. Environment specified config data's ``TIER_LIST_TO_DEPLOY``
allowed_stack_id_list = [
resource_id
for resource_id in can_id_list
if resource_id in master_can.TIER_LIST_TO_DEPLOY.get_value()
]
r_filter = ResourceFilter(allowed_stack_id_list)
# remove ignored stacks
for resource_id, resource in list(master_can.template.resources.items()):
keep_this_flag = r_filter.filter(resource, master_can.template)
if not keep_this_flag:
master_can.template.remove_resource(resource)
else:
if resource_id in self.canlabel_mapper:
nested_canlabel = self.canlabel_mapper[resource_id]
nested_can = nested_canlabel.can_class(**config_data)
nested_can.create_template()
nested_can_mapper[resource_id] = nested_can
template_file = TemplateFile(
template=nested_can.template,
filepath=Path(deploy_workspace_dir,
nested_canlabel.filename)
)
template_file_list.append(template_file)
# construct ExecutionJob
print("=" * 10)
print(can_id_list, env_tag)
master_can.dump_cloudformation_json_config_file()
for template_file in template_file_list:
template_file.make_file(json_or_yml="json")
# break
# if STOP_AT_IND == counter:
# break
return returned_list
| 3,727 | 710 | 152 |
07e38744e543d0958ff29a1c922e0df64b4d8e9b | 2,674 | py | Python | tracker.py | ramondfdez/kalman-tracking | f9439dab0d9daed505e6fd3f0c8a86f47279ee6c | [
"MIT"
] | null | null | null | tracker.py | ramondfdez/kalman-tracking | f9439dab0d9daed505e6fd3f0c8a86f47279ee6c | [
"MIT"
] | null | null | null | tracker.py | ramondfdez/kalman-tracking | f9439dab0d9daed505e6fd3f0c8a86f47279ee6c | [
"MIT"
] | null | null | null | import numpy as np
from kalmanFilter import KalmanFilter
from scipy.optimize import linear_sum_assignment
from collections import deque
class Tracks(object):
"""docstring for Tracks"""
class Tracker(object):
"""docstring for Tracker"""
| 25.961165 | 87 | 0.663426 | import numpy as np
from kalmanFilter import KalmanFilter
from scipy.optimize import linear_sum_assignment
from collections import deque
class Tracks(object):
"""docstring for Tracks"""
def __init__(self, detection, trackId):
super(Tracks, self).__init__()
self.KF = KalmanFilter()
self.KF.predict()
self.KF.correct(np.matrix(detection).reshape(2,1))
self.trace = deque(maxlen=20)
self.prediction = detection.reshape(1,2)
self.trackId = trackId
self.skipped_frames = 0
def predict(self,detection):
self.prediction = np.array(self.KF.predict()).reshape(1,2)
self.KF.correct(np.matrix(detection).reshape(2,1))
class Tracker(object):
"""docstring for Tracker"""
def __init__(self, dist_threshold, max_frame_skipped, max_trace_length):
super(Tracker, self).__init__()
self.dist_threshold = dist_threshold
self.max_frame_skipped = max_frame_skipped
self.max_trace_length = max_trace_length
self.trackId = 0
self.tracks = []
def update(self, detections):
if len(self.tracks) == 0:
for i in range(detections.shape[0]):
track = Tracks(detections[i], self.trackId)
self.trackId +=1
self.tracks.append(track)
N = len(self.tracks)
M = len(detections)
cost = []
for i in range(N):
diff = np.linalg.norm(self.tracks[i].prediction - detections.reshape(-1,2), axis=1)
cost.append(diff)
cost = np.array(cost)*0.1
row, col = linear_sum_assignment(cost)
assignment = [-1]*N
for i in range(len(row)):
assignment[row[i]] = col[i]
un_assigned_tracks = []
for i in range(len(assignment)):
print(cost[i][assignment[i]])
if assignment[i] != -1:
if (cost[i][assignment[i]] > self.dist_threshold):
assignment[i] = -1
un_assigned_tracks.append(i)
pass
else:
self.tracks[i].skipped_frames +=1
del_tracks = []
for i in range(len(self.tracks)):
if self.tracks[i].skipped_frames > self.max_frame_skipped :
del_tracks.append(i)
if len(del_tracks) > 0: # only when skipped frame exceeds max
for id in del_tracks:
if id < len(self.tracks):
del self.tracks[id]
del assignment[id]
else:
pass
# Update KalmanFilter state, lastResults and tracks trace
for i in range(len(detections)):
if i not in assignment:
track = Tracks(detections[i], self.trackId)
self.trackId +=1
self.tracks.append(track)
for i in range(len(assignment)):
if(assignment[i] != -1):
self.tracks[i].skipped_frames = 0
self.tracks[i].predict(detections[assignment[i]])
self.tracks[i].trace.append(self.tracks[i].prediction)
| 2,295 | 0 | 100 |
7a9bead17f4f87fe28b94db3b6288db94bc69e37 | 939 | py | Python | ballir_dicom_manager/file_readers/read_nifti.py | bashirlab/BaLLIR_DICOM_Manager | b24aab9eb6e551bbc684d78cf578e446a9acd526 | [
"MIT"
] | null | null | null | ballir_dicom_manager/file_readers/read_nifti.py | bashirlab/BaLLIR_DICOM_Manager | b24aab9eb6e551bbc684d78cf578e446a9acd526 | [
"MIT"
] | null | null | null | ballir_dicom_manager/file_readers/read_nifti.py | bashirlab/BaLLIR_DICOM_Manager | b24aab9eb6e551bbc684d78cf578e446a9acd526 | [
"MIT"
] | null | null | null | import pathlib
import numpy as np
from ballir_dicom_manager.file_readers.read_image_volume import ReadImageVolume
from ballir_dicom_manager.file_loaders.nifti_loader import NiftiLoader
from ballir_dicom_manager.file_viewers.array_viewer import ArrayViewer
| 34.777778 | 80 | 0.70181 | import pathlib
import numpy as np
from ballir_dicom_manager.file_readers.read_image_volume import ReadImageVolume
from ballir_dicom_manager.file_loaders.nifti_loader import NiftiLoader
from ballir_dicom_manager.file_viewers.array_viewer import ArrayViewer
class ReadNifti(ReadImageVolume):
loader = NiftiLoader()
def __init__(self, target_path: pathlib.Path, value_clip=False):
super().__init__(target_path)
# self.files = self.sorter.sort_dicom_files(self.files)
# self.validator.validate(self.files)
self.value_clip = value_clip
self.spacing = self.files[0].header.get_zooms()
self.set_arr()
def set_arr(self):
self.arr = np.rot90(self.files[0].get_fdata(), k=1, axes=(0, 1)) #
if self.value_clip:
self.arr = np.clip(self.arr, self.value_clip[0], self.value_clip[1])
self.viewer = ArrayViewer(self.arr, self.spacing)
| 563 | 94 | 23 |
c14cc8c968eafae9faee4ceccaee3b5b93fb10da | 4,863 | py | Python | findfaceApp/findeyes/frol.py | WilShi/unity_tool | 14df85aec6f2c889a3c7bf4f88d6632029f96bef | [
"MIT"
] | null | null | null | findfaceApp/findeyes/frol.py | WilShi/unity_tool | 14df85aec6f2c889a3c7bf4f88d6632029f96bef | [
"MIT"
] | null | null | null | findfaceApp/findeyes/frol.py | WilShi/unity_tool | 14df85aec6f2c889a3c7bf4f88d6632029f96bef | [
"MIT"
] | null | null | null |
import datetime
from sys import argv
import tensorflow as tf
import os
import glob
from skimage import io
import matplotlib.pyplot as plt
from multiprocessing import Process
from PIL import Image, ImageDraw
from pathlib import Path
from PyQt5.QtWidgets import QApplication, QWidget, QMessageBox
import qdarkstyle
import numpy as np
from keras.models import Model, load_model
# 提示
if __name__ == "__main__":
Frol().startfind(argv[1])
| 28.946429 | 127 | 0.544931 |
import datetime
from sys import argv
import tensorflow as tf
import os
import glob
from skimage import io
import matplotlib.pyplot as plt
from multiprocessing import Process
from PIL import Image, ImageDraw
from pathlib import Path
from PyQt5.QtWidgets import QApplication, QWidget, QMessageBox
import qdarkstyle
import numpy as np
from keras.models import Model, load_model
class readfile():
def __init__(self) -> None:
self.files = []
def allfile(self, path) -> None:
if os.path.isdir(path):
files = os.listdir(path)
for file in files:
new_file = path+'/'+file
if os.path.isdir(new_file):
self.allfile(new_file)
else:
self.files.append(new_file)
else:
self.files.append(path)
def listfiles(self, path) -> list:
path = self.format_path(path)
self.allfile(path)
return self.files
def format_path(self, path) -> str:
path = os.path.abspath(path)
path = path.replace('\\', '/')
path = path.replace('//', '/')
path = path[:-1] if path[-1] == '/' else path
return path
def last_path(self, path) -> str:
path = path[path.rfind('/')+1:]
return path
def sub_path(self, path, rootpath) -> str:
path = path[path.find(rootpath)+len(rootpath):]
path = path[1:] if path[0] == '/' else path
return path
class Frol():
def forl(self, path, pass_dir, fail_dir):
try:
model = load_model('FCN_baseline.h5')
except:
model = load_model('./findeyes/FCN_baseline.h5')
# img = io.imread(r'C:\\Users\\cn-wilsonshi\\Downloads\\checkfrol\\frol\\GlassCol00128.jpg')
print(path)
img = io.imread(path)
img = img.astype('float') / 255.0
img = np.expand_dims(img, axis=0)
specular_mask = model.predict(img)
th = 0.4
specular_mask[specular_mask > th] = 1.0
specular_mask[specular_mask <= th] = 0
totalpex = len(specular_mask[specular_mask >= 0])
hilightpex = len(specular_mask[specular_mask == 1.0])
rate = round(hilightpex/totalpex, 2)*100
# return rate
print("++"*50)
print(f"{hilightpex} / {totalpex} : {rate} %")
if rate >= 6:
if not os.path.exists(pass_dir): os.makedirs(pass_dir)
Image.open(path).convert('RGB').save(f"{pass_dir}{readfile().last_path(path)}")
else:
if not os.path.exists(fail_dir): os.makedirs(fail_dir)
Image.open(path).convert('RGB').save(f"{fail_dir}{readfile().last_path(path)}")
# plt.subplot(1,2,1)
# plt.imshow(img[0, :,:,:])
# plt.subplot(1,2,2)
# plt.imshow(specular_mask[0, :,:,0], cmap='gray')
# plt.show()
def multforl(self, path, pass_dir, fail_dir):
for i in path:
self.forl(i, pass_dir, fail_dir)
def startfind(self, path):
start = datetime.datetime.now()
paths = readfile().listfiles(path)
print("find file: ", len(paths))
length = len(paths)
fail_dir = '{}/Downloads/fail/'.format(str(Path.home()))
pass_dir_cv = '{}/Downloads/pass/'.format(str(Path.home()))
p1 = []
p2 = []
p3 = []
p4 = []
p5 = []
p6 = []
for i in range(length):
if i < round(length/6):
p1.append(paths[i])
elif i >= round(length/6) and i <(2*round(length/6)):
p2.append(paths[i])
elif i >= (2*round(length/6)) and i < (3*round(length/6)):
p3.append(paths[i])
elif i >= (3*round(length/6)) and i < (4*round(length/6)):
p4.append(paths[i])
elif i >= (4*round(length/6)) and i < (5*round(length/6)):
p5.append(paths[i])
else:
p6.append(paths[i])
multp = [p1,p2,p3,p4,p5,p6]
process_list = []
for i in multp:
print("开始运行")
p = Process(target=self.multforl,args=(i,pass_dir_cv,fail_dir,))
p.start()
process_list.append(p)
for p in process_list:
p.join()
end = datetime.datetime.now()
print(f"总图片:{length} 张 {'*'*10} 用时:{(end - start).seconds} 秒 {'*'*10} 每秒:{round(length/int((end - start).seconds))} 张")
app = QApplication([])
self.Tips("人脸图片清洗已结束\n文件保存在下载文件夹中")
# 提示
def Tips(self, message):
window = QWidget()
window.setWindowOpacity(0.9) # 设置窗口透明度
window.setStyleSheet(qdarkstyle.load_stylesheet_pyqt5()) # 美化风格
QMessageBox.about(window, "提示", message)
if __name__ == "__main__":
Frol().startfind(argv[1])
| 4,203 | -12 | 314 |
d6ccd34a5a30b7522fa57f541100cb9301ffad9e | 1,971 | py | Python | pixeldrain.py | FayasNoushad/Pixeldrain | e8d620986f3d186b6c472b75db8bd3dad58bf4b5 | [
"MIT"
] | 5 | 2021-11-28T12:03:12.000Z | 2021-12-29T03:14:06.000Z | pixeldrain.py | FayasNoushad/Pixeldrain | e8d620986f3d186b6c472b75db8bd3dad58bf4b5 | [
"MIT"
] | null | null | null | pixeldrain.py | FayasNoushad/Pixeldrain | e8d620986f3d186b6c472b75db8bd3dad58bf4b5 | [
"MIT"
] | 1 | 2021-12-21T02:42:41.000Z | 2021-12-21T02:42:41.000Z | import requests
def upload_file(file):
"""
Upload a file to pixeldrain
upload_file(file)
"""
response = requests.post(
"https://pixeldrain.com/api/file",
data={"anonymous": True},
files={"file": open(file, "rb")}
)
return response.json()
def file(file_id):
"""
Returns direct file link
file(file_id)
"""
return "https://pixeldrain.com/api/file/"+file_id
def download_file(file_id, file_name):
"""
Download the full file associated with the ID.
Supports byte range requests.
download_file(file_id, file_name)
"""
response = requests.get(file(file_id))
with open(file_name, "wb") as file:
file.write(response.content)
return file_name
def info(file_id):
"""
Returns information about one or more files.
You can also put a comma separated list of file IDs in the URL and it will return an array of file info, instead of a single object.
info(file_id)
"""
info = requests.get(f"https://pixeldrain.com/api/file/{file_id}/info")
return info.json()
def thumbnail(file_id, width="", height=""):
"""
Returns a PNG thumbnail image representing the file.
The thumbnail image will be 128x128 px by default.
You can specify the width and height with parameters in the URL.
The width and height parameters need to be a multiple of 16.
So the allowed values are 16, 32, 48, 64, 80, 96, 112 and 128.
If a thumbnail cannot be generated for the file you will be redirected to a mime type image of 128x128 px.
thumbnail(file_id, width, height)
width and height is optional
"""
api = f"https://pixeldrain.com/api/file/{file_id}/thumbnail"
api += "?" if width or height else ""
api += "width=" + width if width else ""
api += "&" if width and height else ""
api += "height=" + height if height else ""
thumbnail = requests.get(api)
return thumbnail
| 28.157143 | 136 | 0.646372 | import requests
def upload_file(file):
"""
Upload a file to pixeldrain
upload_file(file)
"""
response = requests.post(
"https://pixeldrain.com/api/file",
data={"anonymous": True},
files={"file": open(file, "rb")}
)
return response.json()
def file(file_id):
"""
Returns direct file link
file(file_id)
"""
return "https://pixeldrain.com/api/file/"+file_id
def download_file(file_id, file_name):
"""
Download the full file associated with the ID.
Supports byte range requests.
download_file(file_id, file_name)
"""
response = requests.get(file(file_id))
with open(file_name, "wb") as file:
file.write(response.content)
return file_name
def info(file_id):
"""
Returns information about one or more files.
You can also put a comma separated list of file IDs in the URL and it will return an array of file info, instead of a single object.
info(file_id)
"""
info = requests.get(f"https://pixeldrain.com/api/file/{file_id}/info")
return info.json()
def thumbnail(file_id, width="", height=""):
"""
Returns a PNG thumbnail image representing the file.
The thumbnail image will be 128x128 px by default.
You can specify the width and height with parameters in the URL.
The width and height parameters need to be a multiple of 16.
So the allowed values are 16, 32, 48, 64, 80, 96, 112 and 128.
If a thumbnail cannot be generated for the file you will be redirected to a mime type image of 128x128 px.
thumbnail(file_id, width, height)
width and height is optional
"""
api = f"https://pixeldrain.com/api/file/{file_id}/thumbnail"
api += "?" if width or height else ""
api += "width=" + width if width else ""
api += "&" if width and height else ""
api += "height=" + height if height else ""
thumbnail = requests.get(api)
return thumbnail
| 0 | 0 | 0 |
72b3cfe0b7ace9709b11fe2908ccaeabc29f1262 | 1,359 | py | Python | 08.Regression/8.2.LinearRegression_CV.py | radiumweilei/chinahadoop-ml-2 | ea886610a6ccb278afeff759bf2dc8a30ef3f275 | [
"Apache-2.0"
] | null | null | null | 08.Regression/8.2.LinearRegression_CV.py | radiumweilei/chinahadoop-ml-2 | ea886610a6ccb278afeff759bf2dc8a30ef3f275 | [
"Apache-2.0"
] | null | null | null | 08.Regression/8.2.LinearRegression_CV.py | radiumweilei/chinahadoop-ml-2 | ea886610a6ccb278afeff759bf2dc8a30ef3f275 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# -*- coding:utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import Lasso, Ridge, ElasticNet
from sklearn.model_selection import GridSearchCV
if __name__ == "__main__":
# pandas读入
data = pd.read_csv('8.Advertising.csv') # TV、Radio、Newspaper、Sales
x = data[['TV', 'Radio', 'Newspaper']]
# x = data[['TV', 'Radio']]
y = data['Sales']
print(x)
print(y)
x_train, x_test, y_train, y_test = train_test_split(x, y, random_state=1)
# print x_train, y_train
model = Lasso()
# model = Ridge()
alpha_can = np.logspace(-3, 2, 10) # 多个alpha 用于交叉验证
lasso_model = GridSearchCV(model, param_grid={'alpha': alpha_can}, cv=5) # cv=5 5折交叉验证, 即分成5分
lasso_model.fit(x, y)
print('验证参数:\n', lasso_model.best_params_) # 找出最佳模型参数
y_hat = lasso_model.predict(np.array(x_test))
print(lasso_model.score(x_test, y_test))
mse = np.average((y_hat - np.array(y_test)) ** 2) # Mean Squared Error
rmse = np.sqrt(mse) # Root Mean Squared Error
print(mse, rmse)
t = np.arange(len(x_test))
plt.plot(t, y_test, 'r-', linewidth=2, label='Test')
plt.plot(t, y_hat, 'g-', linewidth=2, label='Predict')
plt.legend(loc='upper right')
plt.grid()
plt.show()
| 32.357143 | 98 | 0.658572 | #!/usr/bin/python
# -*- coding:utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import Lasso, Ridge, ElasticNet
from sklearn.model_selection import GridSearchCV
if __name__ == "__main__":
# pandas读入
data = pd.read_csv('8.Advertising.csv') # TV、Radio、Newspaper、Sales
x = data[['TV', 'Radio', 'Newspaper']]
# x = data[['TV', 'Radio']]
y = data['Sales']
print(x)
print(y)
x_train, x_test, y_train, y_test = train_test_split(x, y, random_state=1)
# print x_train, y_train
model = Lasso()
# model = Ridge()
alpha_can = np.logspace(-3, 2, 10) # 多个alpha 用于交叉验证
lasso_model = GridSearchCV(model, param_grid={'alpha': alpha_can}, cv=5) # cv=5 5折交叉验证, 即分成5分
lasso_model.fit(x, y)
print('验证参数:\n', lasso_model.best_params_) # 找出最佳模型参数
y_hat = lasso_model.predict(np.array(x_test))
print(lasso_model.score(x_test, y_test))
mse = np.average((y_hat - np.array(y_test)) ** 2) # Mean Squared Error
rmse = np.sqrt(mse) # Root Mean Squared Error
print(mse, rmse)
t = np.arange(len(x_test))
plt.plot(t, y_test, 'r-', linewidth=2, label='Test')
plt.plot(t, y_hat, 'g-', linewidth=2, label='Predict')
plt.legend(loc='upper right')
plt.grid()
plt.show()
| 0 | 0 | 0 |
549b9bd68f1c4944ff9d770440583ba0a6c1005e | 1,967 | py | Python | lbrynet/daemon/auth/factory.py | vyaspranjal33/lbry | e03e41ad3105ccc0d8d8891b0e9fa63f9bbfce34 | [
"MIT"
] | null | null | null | lbrynet/daemon/auth/factory.py | vyaspranjal33/lbry | e03e41ad3105ccc0d8d8891b0e9fa63f9bbfce34 | [
"MIT"
] | null | null | null | lbrynet/daemon/auth/factory.py | vyaspranjal33/lbry | e03e41ad3105ccc0d8d8891b0e9fa63f9bbfce34 | [
"MIT"
] | null | null | null | import logging
from twisted.web import server, guard, resource
from twisted.cred import portal
from lbrynet import conf
from .auth import PasswordChecker, HttpPasswordRealm
from ..auth.keyring import Keyring
log = logging.getLogger(__name__)
| 38.568627 | 107 | 0.688866 | import logging
from twisted.web import server, guard, resource
from twisted.cred import portal
from lbrynet import conf
from .auth import PasswordChecker, HttpPasswordRealm
from ..auth.keyring import Keyring
log = logging.getLogger(__name__)
class HTTPJSONRPCFactory(server.Site):
def __init__(self, resource, keyring, requestFactory=None, *args, **kwargs):
super().__init__(resource, requestFactory=requestFactory, *args, **kwargs)
self.use_ssl = False
class HTTPSJSONRPCFactory(server.Site):
def __init__(self, resource, keyring, requestFactory=None, *args, **kwargs):
super().__init__(resource, requestFactory=requestFactory, *args, **kwargs)
self.options = keyring.private_certificate.options()
self.use_ssl = True
class AuthJSONRPCResource(resource.Resource):
def __init__(self, protocol):
resource.Resource.__init__(self)
self.putChild(b"", protocol)
self.putChild(conf.settings['API_ADDRESS'].encode(), protocol)
def getChild(self, name, request):
request.setHeader('cache-control', 'no-cache, no-store, must-revalidate')
request.setHeader('expires', '0')
return self if name == '' else resource.Resource.getChild(self, name, request)
def getServerFactory(self, keyring: Keyring, use_authentication: bool, use_https: bool) -> server.Site:
factory_class = HTTPSJSONRPCFactory if use_https else HTTPJSONRPCFactory
if use_authentication:
log.info("Using authenticated API")
checker = PasswordChecker(keyring)
realm = HttpPasswordRealm(self)
portal_to_realm = portal.Portal(realm, [checker, ])
root = guard.HTTPAuthSessionWrapper(
portal_to_realm, [guard.BasicCredentialFactory('Login to lbrynet api'), ]
)
else:
log.info("Using non-authenticated API")
root = self
return factory_class(root, keyring)
| 1,459 | 59 | 201 |
f9abc59e0046c917fae8331d828bf949fa2bf3b8 | 11,840 | py | Python | http_async_client/base.py | jossefaz/async-http-client | 74fe7540d1781d64d41161350019bc5ccfec6426 | [
"MIT"
] | 1 | 2021-12-05T21:01:59.000Z | 2021-12-05T21:01:59.000Z | http_async_client/base.py | jossefaz/http-async-client | 74fe7540d1781d64d41161350019bc5ccfec6426 | [
"MIT"
] | null | null | null | http_async_client/base.py | jossefaz/http-async-client | 74fe7540d1781d64d41161350019bc5ccfec6426 | [
"MIT"
] | null | null | null | from functools import partial
from typing import Union, Dict, Optional
from http_async_client.enums import SupportedProtocols, Methods
import httpx
import re
from dataclasses import dataclass
from httpx._types import RequestContent, URLTypes, RequestData, RequestFiles, QueryParamTypes, HeaderTypes, CookieTypes
from nanoid import generate
import base64
import threading
from httpx import Request
class EndPointRegistry(type):
"""This Class is a singleton that inherits from the `type` class, in order to provide it as a metaclass to other classes
This class is the core of the HTTP client that differs from others client, because it will allow to manage different
domains within the same class
This is very useful for example if you need to send request to different third party APIS and you want to follow the
way of that request with a same request ID.
With this class you can keep a domain registry. Every new domain will be registered to this class. On each new call,
it will check if the domain exists in the registry and if not il will
create and entry for it. Afterward it will set this domain as the current domain.
"""
def __call__(cls, *args, **kwargs):
"""
Instantiate the Singleton using the thread library in order to guarantee only one instance !
Arguments:
host: string, the domain's host
port: int : Optional
protocol : string, must be a member of the SupportedProtocol Enum
Returns:
cls.__instance : EndPointRegistry instance
"""
if cls.__instance is None:
with cls._locker:
if cls.__instance is None:
cls.__instance = super().__call__(*args, **kwargs)
# On each call : add to registry (if it is already in the reg, it wont be added but only defined as current)
cls.add_to_reg(**kwargs)
return cls.__instance
def add_to_reg(cls, **kwargs):
"""Method that will create and eventually add a class EndPoint instance object and will add it to the registry if its base64 url is not present in it
In that way, if there is the same origin with two different ports, it will be two different entry in the registry
Arguments:
host: string, the domain's host
port: int : Optional
protocol : string, must be a member of the SupportedProtocol Enum
"""
port = kwargs.get("port", None)
protocol = kwargs.get("protocol", None)
host = kwargs.get("host", None)
end_point = EndPoint(host, port, protocol)
if not end_point.base_url:
raise ValueError("EndPointRegistry error trying to add new client : host is missing")
try:
end_point_key = base64.b64encode(bytes(end_point.base_url, encoding='utf-8'))
if end_point_key not in cls.endpoints_registry:
cls.endpoints_registry[end_point_key] = end_point
cls.current = end_point_key
except TypeError as te:
raise TypeError(f"Cannot encode base url to registry : {str(te)}")
@dataclass
async_client_factory = BaseRESTAsyncClient.get_instance
| 40.409556 | 157 | 0.601098 | from functools import partial
from typing import Union, Dict, Optional
from http_async_client.enums import SupportedProtocols, Methods
import httpx
import re
from dataclasses import dataclass
from httpx._types import RequestContent, URLTypes, RequestData, RequestFiles, QueryParamTypes, HeaderTypes, CookieTypes
from nanoid import generate
import base64
import threading
from httpx import Request
class EndPointRegistry(type):
"""This Class is a singleton that inherits from the `type` class, in order to provide it as a metaclass to other classes
This class is the core of the HTTP client that differs from others client, because it will allow to manage different
domains within the same class
This is very useful for example if you need to send request to different third party APIS and you want to follow the
way of that request with a same request ID.
With this class you can keep a domain registry. Every new domain will be registered to this class. On each new call,
it will check if the domain exists in the registry and if not il will
create and entry for it. Afterward it will set this domain as the current domain.
"""
def __init__(cls, *args, **kwargs):
cls.__instance = None
cls._locker = threading.Lock()
cls.endpoints_registry: Dict[bytes, EndPoint] = {}
cls.current = bytes()
super().__init__(*args, **kwargs)
def __call__(cls, *args, **kwargs):
"""
Instantiate the Singleton using the thread library in order to guarantee only one instance !
Arguments:
host: string, the domain's host
port: int : Optional
protocol : string, must be a member of the SupportedProtocol Enum
Returns:
cls.__instance : EndPointRegistry instance
"""
if cls.__instance is None:
with cls._locker:
if cls.__instance is None:
cls.__instance = super().__call__(*args, **kwargs)
# On each call : add to registry (if it is already in the reg, it wont be added but only defined as current)
cls.add_to_reg(**kwargs)
return cls.__instance
def add_to_reg(cls, **kwargs):
"""Method that will create and eventually add a class EndPoint instance object and will add it to the registry if its base64 url is not present in it
In that way, if there is the same origin with two different ports, it will be two different entry in the registry
Arguments:
host: string, the domain's host
port: int : Optional
protocol : string, must be a member of the SupportedProtocol Enum
"""
port = kwargs.get("port", None)
protocol = kwargs.get("protocol", None)
host = kwargs.get("host", None)
end_point = EndPoint(host, port, protocol)
if not end_point.base_url:
raise ValueError("EndPointRegistry error trying to add new client : host is missing")
try:
end_point_key = base64.b64encode(bytes(end_point.base_url, encoding='utf-8'))
if end_point_key not in cls.endpoints_registry:
cls.endpoints_registry[end_point_key] = end_point
cls.current = end_point_key
except TypeError as te:
raise TypeError(f"Cannot encode base url to registry : {str(te)}")
@dataclass
class EndPoint:
host: str
port: int
_protocol: str
@property
def base_url(self) -> Union[bool, str]:
"""Build the base url based on the protocol, the host and the port. Only host is mandatory, others will be ignored or given default value.
Returns:
The Base URL following this template "{protocol}://{host}:{port}"
"""
if not self.host:
return False
return f"{self.protocol.value}://{self.host}:{self.port}" if self.port \
else f"{self.protocol.value}://{self.host}"
@property
def protocol(self) -> SupportedProtocols:
"""Get the protocol if the one that was given in constructor is supported, otherwise give the default http protocol
Returns:
Entry of the enum SupportedProtocols
"""
if self._protocol in SupportedProtocols.__members__:
return SupportedProtocols[self._protocol]
return SupportedProtocols.http
class BaseRESTAsyncClient(metaclass=EndPointRegistry):
def __init__(self, *, host, port=None, protocol=None):
self._request_id = None
@classmethod
def get_instance(cls, *, host: str, port: Optional[int] = None,
protocol: Optional[str] = None) -> "partial[BaseRESTAsyncClient]":
"""Will return a factory (as a partial function) in order to always ensure the current endpoint is selected in the endpoints registry
Arguments:
host: domain's host
port: listening port
protocol: Network Protocol (must be a value of the SupportedProtocols Enum)
Returns:
partial function (BaseRESTAsyncClient factory)
Example:
```python
client = BaseRESTAsyncClient.get_instance("example.com", 8080, "https")
```
"""
return partial(BaseRESTAsyncClient, host=host, port=port, protocol=protocol)
@property
def request_id(self) -> str:
"""Getter for the request id
Returns:
nanoid: uid of the current request
"""
if not self._request_id:
return None
return str(self._request_id)
@request_id.setter
def request_id(self, req_id):
"""Setter for the request id
Arguments:
req_id : UID (nanoid) of the request
Todo:
* Check if there is any pre existing request ID from the incoming request headers and generate one ONLY IF there is no
"""
self._request_id = generate()
def get_base_url(self) -> str:
return self.endpoints_registry[self.current].base_url
def make_url(self, url: str = ""):
"""Url builder based on the host base url
Arguments:
url: relative url that will be concatenate wil the host base url
Returns:
string: An absolute url including the protocol, the host base url, port (if any) and the relative url if any
"""
# Ensure to remove keep only one "/" along all the url
url = re.sub('/+', '/', url)
# remove the first "/" at the beginning
url = re.sub('^/', '', url)
return f"{self.get_base_url()}/{url}"
async def _send_request(self, req: Request):
"""
Arguments:
req: a Request ([httpx](https://www.python-httpx.org/api/#request) type)
Returns:
coroutine: handle the HTTP response by awaiting it
"""
async with httpx.AsyncClient() as client:
return await client.send(req)
async def get(self,
url: URLTypes = "",
*,
params: QueryParamTypes = None,
headers: HeaderTypes = None,
cookies: CookieTypes = None):
"""Prepare an HTTP `GET` request and send it asynchronously
Arguments:
url: Relative URL (from the base URL)
params: Query string
headers: HTTP Headers (Key Value)
cookies: HTTP Cookies
Returns:
coroutine : result of the `_send_request` method. It need to be awaited in order to get the HTTP response
"""
request = Request(Methods.get.value, self.make_url(url), params=params, headers=headers, cookies=cookies)
return await self._send_request(request)
async def post(self,
url: URLTypes = "",
*,
headers: HeaderTypes = None,
cookies: CookieTypes = None,
content: RequestContent = None,
data: RequestData = None,
files: RequestFiles = None):
"""Prepare an HTTP `POST` request and send it asynchronously
Arguments:
url: Relative URL (from the base URL)
headers: HTTP Headers (Key Value)
cookies: HTTP Cookies
data: JSON, Files, Form,
content: All contents that are NOT one of : Form encoded, Multipart files, JSON. Could be use for text or binaries
files: Blob stream
Returns:
coroutine : result of the `_send_request` method. It need to be awaited in order to get the HTTP response
"""
request = Request(Methods.post.value, self.make_url(url),
content=content,
data=data,
files=files,
headers=headers,
cookies=cookies)
return await self._send_request(request)
async def put(self,
url: URLTypes = "",
*,
headers: HeaderTypes = None,
cookies: CookieTypes = None,
data: RequestData = None):
"""Prepare an HTTP `PUT` request and send it asynchronously
Arguments:
url: Relative URL (from the base URL)
headers: HTTP Headers (Key Value)
cookies: HTTP Cookies
data: JSON, Files, Form,
Returns:
coroutine : result of the `_send_request` method. It need to be awaited in order to get the HTTP response
"""
request = Request(Methods.put.value, self.make_url(url),
data=data,
headers=headers,
cookies=cookies)
return await self._send_request(request)
async def patch(self,
url: URLTypes = "",
*,
headers: HeaderTypes = None,
cookies: CookieTypes = None,
data: RequestData = None):
"""Prepare an HTTP `PATCH` request and send it asynchronously
Arguments:
url: Relative URL (from the base URL)
headers: HTTP Headers (Key Value)
cookies: HTTP Cookies
data: JSON, Files, Form,
Returns:
coroutine : result of the `_send_request` method. It need to be awaited in order to get the HTTP response
"""
request = Request(Methods.patch.value, self.make_url(url),
data=data,
headers=headers,
cookies=cookies)
return await self._send_request(request)
async def delete(self,
url: URLTypes = "",
*,
params: QueryParamTypes = None,
headers: HeaderTypes = None,
cookies: CookieTypes = None):
"""Prepare an HTTP `DELETE` request and send it asynchronously
Arguments:
url: Relative URL (from the base URL)
params: Query string
headers: HTTP Headers (Key Value)
cookies: HTTP Cookies
Returns:
coroutine : result of the `_send_request` method. It need to be awaited in order to get the HTTP response
"""
request = Request(Methods.delete.value, self.make_url(url), params=params, headers=headers, cookies=cookies)
return await self._send_request(request)
def __call__(self, *args, **kwargs):
"""
Will trow an error that avoid BaseRESTAsyncClient to be called directly and force use the get_instance class method
"""
raise TypeError("BaseClient cannot be called directly use get_instance class method instead")
async_client_factory = BaseRESTAsyncClient.get_instance
| 350 | 8,208 | 72 |
da8499f0797074b5c7fabba9eb9bbeb7b0b66b09 | 728 | py | Python | examples/textArea.py | tgolsson/appJar | 5e2f8bff44e927e7c2bae17fccddc6dbf79952f0 | [
"Apache-2.0"
] | 666 | 2016-11-14T18:17:40.000Z | 2022-03-29T03:53:22.000Z | examples/textArea.py | tgolsson/appJar | 5e2f8bff44e927e7c2bae17fccddc6dbf79952f0 | [
"Apache-2.0"
] | 598 | 2016-10-20T21:04:09.000Z | 2022-03-15T22:44:49.000Z | examples/textArea.py | tgolsson/appJar | 5e2f8bff44e927e7c2bae17fccddc6dbf79952f0 | [
"Apache-2.0"
] | 95 | 2017-01-19T12:23:58.000Z | 2022-03-06T18:16:21.000Z | import sys
sys.path.append("../")
from appJar import gui
app=gui()
app.addScrolledTextArea("t1")
app.setTextAreaChangeFunction("t1", press)
app.addButtons(["CLEAR", "SET"], press)
app.addButtons(["LOG", "CHECK"], log)
app.addCheckBox("CALL")
app.addCheckBox("END")
app.addEntry("text")
app.go()
| 23.483871 | 117 | 0.637363 | import sys
sys.path.append("../")
from appJar import gui
def press(btn=None):
if btn == "CLEAR":
app.clearTextArea("t1", callFunction=app.getCheckBox("CALL"))
elif btn == "SET":
app.setTextArea("t1", app.getEntry("text"), callFunction=app.getCheckBox("CALL"), end=app.getCheckBox("END"))
else:
print("changed:", btn)
def log(btn):
if btn == "LOG":
app.logTextArea("t1")
elif btn == "CHECK":
print(app.textAreaChanged("t1"))
app=gui()
app.addScrolledTextArea("t1")
app.setTextAreaChangeFunction("t1", press)
app.addButtons(["CLEAR", "SET"], press)
app.addButtons(["LOG", "CHECK"], log)
app.addCheckBox("CALL")
app.addCheckBox("END")
app.addEntry("text")
app.go()
| 383 | 0 | 46 |
588b761156ea659c9c346c22dd0ae03fb49cf417 | 12,480 | py | Python | dp_multiq/tree.py | michaeljneely/google-research | 8028769662d23426be6d316e4ab954e8c85cf8fe | [
"Apache-2.0"
] | 1 | 2021-08-15T20:03:51.000Z | 2021-08-15T20:03:51.000Z | dp_multiq/tree.py | michaeljneely/google-research | 8028769662d23426be6d316e4ab954e8c85cf8fe | [
"Apache-2.0"
] | 12 | 2021-08-25T16:15:31.000Z | 2022-02-10T05:10:37.000Z | dp_multiq/tree.py | michaeljneely/google-research | 8028769662d23426be6d316e4ab954e8c85cf8fe | [
"Apache-2.0"
] | 1 | 2021-05-09T07:26:31.000Z | 2021-05-09T07:26:31.000Z | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tree method for computing multiple DP quantiles.
Code is modeled after the quantile trees implementation in this Java library:
https://github.com/google/differential-privacy/blob/main/java/main/com/google/privacy/differentialprivacy/BoundedQuantiles.java
The method is essentially using range trees to answer rank queries, as in the
mechanism presented in Section 7.2 of "Private and Continual Release of
Statistics" by Chan et al.: https://eprint.iacr.org/2010/076.pdf
"""
import collections
import enum
import numpy as np
# Smallest value difference that is considered significant.
_NUMERICAL_TOLERANCE = 1e-6
# Index of the root of the tree.
_ROOT_INDEX = 0
# Heuristic for filtering out empty nodes. Suppose that the total sum of a
# node's noisy value and all of its siblings' noisy values is t. Then, if the
# node's value is less than _ALPHA * t, it will be discarded, and a new sum t'
# will be computed excluding it. Setting _ALPHA to zero implies no filtering.
_ALPHA = 0.005
class PrivateQuantileTree:
"""Tree structure for computing DP quantiles."""
def __init__(self,
noise_type,
epsilon,
delta,
data_low,
data_high,
swap,
tree_height=4,
branching_factor=16):
"""Initializes an empty tree and creates a noise generator.
Leaf nodes of the tree can be thought of as bins that uniformly partition
the [data_low, data_high] range.
Args:
noise_type: Sepecifies a value from the NoiseType enum.
epsilon: Differential privacy parameter epsilon.
delta: Differential privacy parameter delta.
data_low: Smallest possible value for data points; any data points with
smaller values will be clamped at data_low.
data_high: Largest possible value for data points; any data points with
larger values will be clamped at data_high.
swap: If true, uses swap dp sensitivity, otherwise uses add-remove.
tree_height: Depth of the tree structure. Must be greater than or equal
to one; height zero corresponds to a tree that is just a single node.
branching_factor: Number of children of each internal tree node. Must be
at least two.
Throws: ValueError if any input arg does not conform to the above
specifications.
"""
if data_low >= data_high:
raise ValueError("Invalid data bounds [{}, {}]; data_low must be smaller "
"than data_high.".format(data_low, data_high))
self._data_low = data_low
self._data_high = data_high
if tree_height < 1:
raise ValueError(
"Invalid value of {} for tree_height input; height must be at least"
" 1.".format(tree_height))
self._tree_height = tree_height
if branching_factor < 2:
raise ValueError("Invalid value of {} for branching_factor input; factor "
"must be at least 2.".format(branching_factor))
self._branching_factor = branching_factor
self._tree = collections.Counter()
self._noised_tree = {}
self._num_leaves = branching_factor**tree_height
num_nodes = ((branching_factor**(tree_height + 1)) - 1) / (
branching_factor - 1)
self._leftmost_leaf_index = (int)(num_nodes - self._num_leaves)
self._range = self._data_high - self._data_low
self._finalized = False
# Create noise generator function.
# For sensitivity computations: We assume each user contributes one data
# point, which means that each user contributes a count of one to one node
# in each level of the tree. L1 and L2 sensitivity are thus identical.
scaling = 2 if swap else 1
sensitivity = scaling * self._tree_height
if noise_type == PrivateQuantileTree.NoiseType.LAPLACE:
scale = sensitivity / epsilon
self._gen_noise = lambda: np.random.laplace(loc=0.0, scale=scale)
elif noise_type == PrivateQuantileTree.NoiseType.GAUSSIAN:
stdev = np.sqrt(2 * sensitivity * np.log(1.32 / delta)) / epsilon
self._gen_noise = lambda: np.random.normal(loc=0.0, scale=stdev)
else:
raise ValueError(
"Invalid value of {} for noise_type input.".format(noise_type))
def get_leaf_indices(self, values):
"""Returns the indices of the leaf node bins into which the values fall.
Leaf nodes uniformly partition the [data_low, data_high] range.
Args:
values: Array of values, assumed to lie in [data_low, data_high].
"""
range_fracs = (values - self._data_low) / self._range
leaf_indices = np.trunc(range_fracs * self._num_leaves)
high_values = values == self._data_high
leaf_indices[high_values] -= 1
return self._leftmost_leaf_index + leaf_indices
def get_parents(self, child_indices):
"""Returns the indices of the parents of the child_indices nodes.
Args:
child_indices: Array of child indices.
"""
return np.trunc((child_indices - 1) / self._branching_factor)
def add_data(self, data):
""""Inserts data into the tree.
Args:
data: Array of data points.
Raises:
RuntimeError: If this method is called after tree is finalized.
"""
if self._finalized:
raise RuntimeError("Cannot add data once tree is finalized.")
if data.size == 0:
return
clipped_data = np.clip(data, self._data_low, self._data_high)
# Increment counts at leaf nodes and then iterate upwards, incrementing
# counts at all ancestors on the path to the root (but not the root itself).
indices = self.get_leaf_indices(clipped_data)
indices, counts = np.unique(indices, return_counts=True)
index_count_map = dict(zip(indices, counts))
while indices[0] != _ROOT_INDEX:
self._tree.update(index_count_map)
new_indices = self.get_parents(indices)
new_index_count_map = collections.Counter()
for i in range(len(indices)):
new_index_count_map[new_indices[i]] += index_count_map[indices[i]]
indices = np.unique(new_indices)
index_count_map = new_index_count_map
return
def finalize(self):
"""Disables calling add_data, and enables calling compute_quantile."""
self._finalized = True
return
def get_leftmost_child(self, parent_index):
"""Returns the leftmost (lowest-numbered) child of the parent_index node.
Args:
parent_index: Index of the parent node.
"""
return parent_index * self._branching_factor + 1
def get_rightmost_child(self, parent_index):
"""Returns the rightmost (highest-numbered) child of the parent_index node.
Args:
parent_index: Index of the parent node.
"""
return (parent_index + 1) * self._branching_factor
def get_left_value(self, index):
"""Returns the minimum value that is mapped to index's subtree.
Args:
index: Index of a node in the tree.
"""
# Find the smallest-index leaf node in this subtree.
while index < self._leftmost_leaf_index:
index = self.get_leftmost_child(index)
return self._data_low + self._range * (
index - self._leftmost_leaf_index) / self._num_leaves
def get_right_value(self, index):
"""Returns the maximum value that is mapped to index's subtree.
Args:
index: Index of a node in the tree.
"""
# Find the largest-index leaf node in this subtree.
while index < self._leftmost_leaf_index:
index = self.get_rightmost_child(index)
return self._data_low + self._range * (index - self._leftmost_leaf_index +
1) / self._num_leaves
def get_noised_count(self, index):
"""Returns a noised version of the count for the given index.
Note that if the count has previously been noised, the same value as before
is returned.
Args:
index: Index of a node in the tree.
"""
if index in self._noised_tree:
return self._noised_tree[index]
noised_count = self._tree[index] + self._gen_noise()
self._noised_tree[index] = noised_count
return noised_count
def compute_quantile(self, quantile):
"""Returns a differentially private estimate of the quantile.
Args:
quantile: A value in [0, 1].
"""
# Ensure no data can be added once a quantile has been computed.
self.finalize()
if quantile < 0.0 or quantile > 1.0:
raise ValueError(
"Quantile must be in [0, 1]; requested quantile {}.".format(quantile))
# Find the (approximate) index of the leaf node containing the quantile.
index = _ROOT_INDEX
while index < self._leftmost_leaf_index:
leftmost_child_index = self.get_leftmost_child(index)
rightmost_child_index = self.get_rightmost_child(index)
# Sum all child nodes' noisy counts.
noised_counts = np.asarray([
self.get_noised_count(i)
for i in range(leftmost_child_index, rightmost_child_index + 1)
])
total = np.sum(noised_counts)
# If all child nodes are "empty", return rank value of current subtree.
if total <= 0.0:
break
# Sum again, but only noisy counts exceeding min_value_cutoff.
min_value_cutoff = total * _ALPHA
passes_cutoff = noised_counts >= min_value_cutoff
filtered_counts = noised_counts[passes_cutoff]
adjusted_total = np.sum(filtered_counts)
if adjusted_total == 0.0:
break
# Find the child whose subtree contains the quantile.
partial_count = 0.0
for i in range(self._branching_factor):
# Skip nodes whose contributions are too small.
if passes_cutoff[i]:
ith_count = noised_counts[i]
partial_count += ith_count
# Break if the current child's subtree contains the quantile.
if partial_count / adjusted_total >= quantile - _NUMERICAL_TOLERANCE:
quantile = (adjusted_total * quantile -
(partial_count - ith_count)) / ith_count
# Truncate at 1; calculated quantile may be larger than 1, due to
# the subtraction of the numerical tolerance value above.
quantile = min(quantile, 1.0)
index = i + leftmost_child_index
break
# Linearly interpolate between the min and max values associated with the
# node of the current index.
return (1 - quantile) * self.get_left_value(
index) + quantile * self.get_right_value(index)
def tree(sampled_data,
data_low,
data_high,
qs,
eps,
delta,
swap,
tree_height=4,
branching_factor=16):
"""Computes (eps, delta)-differentially private quantile estimates for qs.
Creates a PrivateQuantileTree with Laplace noise when delta is zero, and
Gaussian noise otherwise.
Args:
sampled_data: Array of data points.
data_low: Lower bound for data.
data_high: Upper bound for data.
qs: Increasing array of quantiles in (0,1).
eps: Privacy parameter epsilon.
delta: Privacy parameter delta.
swap: If true, uses swap dp sensitivity, otherwise uses add-remove.
tree_height: Height for PrivateQuantileTree.
branching_factor: Branching factor for PrivateQuantileTree.
Returns:
Array o where o[i] is the quantile estimate corresponding to quantile q[i].
"""
noise_type = (
PrivateQuantileTree.NoiseType.LAPLACE
if delta == 0 else PrivateQuantileTree.NoiseType.GAUSSIAN)
t = PrivateQuantileTree(
noise_type=noise_type,
epsilon=eps,
delta=delta,
data_low=data_low,
data_high=data_high,
swap=swap,
tree_height=tree_height,
branching_factor=branching_factor)
t.add_data(sampled_data)
results = np.empty(len(qs))
for i in range(len(qs)):
results[i] = t.compute_quantile(qs[i])
return results
| 35.965418 | 127 | 0.685817 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tree method for computing multiple DP quantiles.
Code is modeled after the quantile trees implementation in this Java library:
https://github.com/google/differential-privacy/blob/main/java/main/com/google/privacy/differentialprivacy/BoundedQuantiles.java
The method is essentially using range trees to answer rank queries, as in the
mechanism presented in Section 7.2 of "Private and Continual Release of
Statistics" by Chan et al.: https://eprint.iacr.org/2010/076.pdf
"""
import collections
import enum
import numpy as np
# Smallest value difference that is considered significant.
_NUMERICAL_TOLERANCE = 1e-6
# Index of the root of the tree.
_ROOT_INDEX = 0
# Heuristic for filtering out empty nodes. Suppose that the total sum of a
# node's noisy value and all of its siblings' noisy values is t. Then, if the
# node's value is less than _ALPHA * t, it will be discarded, and a new sum t'
# will be computed excluding it. Setting _ALPHA to zero implies no filtering.
_ALPHA = 0.005
class PrivateQuantileTree:
"""Tree structure for computing DP quantiles."""
class NoiseType(enum.Enum):
LAPLACE = 1
GAUSSIAN = 2
def __init__(self,
noise_type,
epsilon,
delta,
data_low,
data_high,
swap,
tree_height=4,
branching_factor=16):
"""Initializes an empty tree and creates a noise generator.
Leaf nodes of the tree can be thought of as bins that uniformly partition
the [data_low, data_high] range.
Args:
noise_type: Sepecifies a value from the NoiseType enum.
epsilon: Differential privacy parameter epsilon.
delta: Differential privacy parameter delta.
data_low: Smallest possible value for data points; any data points with
smaller values will be clamped at data_low.
data_high: Largest possible value for data points; any data points with
larger values will be clamped at data_high.
swap: If true, uses swap dp sensitivity, otherwise uses add-remove.
tree_height: Depth of the tree structure. Must be greater than or equal
to one; height zero corresponds to a tree that is just a single node.
branching_factor: Number of children of each internal tree node. Must be
at least two.
Throws: ValueError if any input arg does not conform to the above
specifications.
"""
if data_low >= data_high:
raise ValueError("Invalid data bounds [{}, {}]; data_low must be smaller "
"than data_high.".format(data_low, data_high))
self._data_low = data_low
self._data_high = data_high
if tree_height < 1:
raise ValueError(
"Invalid value of {} for tree_height input; height must be at least"
" 1.".format(tree_height))
self._tree_height = tree_height
if branching_factor < 2:
raise ValueError("Invalid value of {} for branching_factor input; factor "
"must be at least 2.".format(branching_factor))
self._branching_factor = branching_factor
self._tree = collections.Counter()
self._noised_tree = {}
self._num_leaves = branching_factor**tree_height
num_nodes = ((branching_factor**(tree_height + 1)) - 1) / (
branching_factor - 1)
self._leftmost_leaf_index = (int)(num_nodes - self._num_leaves)
self._range = self._data_high - self._data_low
self._finalized = False
# Create noise generator function.
# For sensitivity computations: We assume each user contributes one data
# point, which means that each user contributes a count of one to one node
# in each level of the tree. L1 and L2 sensitivity are thus identical.
scaling = 2 if swap else 1
sensitivity = scaling * self._tree_height
if noise_type == PrivateQuantileTree.NoiseType.LAPLACE:
scale = sensitivity / epsilon
self._gen_noise = lambda: np.random.laplace(loc=0.0, scale=scale)
elif noise_type == PrivateQuantileTree.NoiseType.GAUSSIAN:
stdev = np.sqrt(2 * sensitivity * np.log(1.32 / delta)) / epsilon
self._gen_noise = lambda: np.random.normal(loc=0.0, scale=stdev)
else:
raise ValueError(
"Invalid value of {} for noise_type input.".format(noise_type))
def get_leaf_indices(self, values):
"""Returns the indices of the leaf node bins into which the values fall.
Leaf nodes uniformly partition the [data_low, data_high] range.
Args:
values: Array of values, assumed to lie in [data_low, data_high].
"""
range_fracs = (values - self._data_low) / self._range
leaf_indices = np.trunc(range_fracs * self._num_leaves)
high_values = values == self._data_high
leaf_indices[high_values] -= 1
return self._leftmost_leaf_index + leaf_indices
def get_parents(self, child_indices):
"""Returns the indices of the parents of the child_indices nodes.
Args:
child_indices: Array of child indices.
"""
return np.trunc((child_indices - 1) / self._branching_factor)
def add_data(self, data):
""""Inserts data into the tree.
Args:
data: Array of data points.
Raises:
RuntimeError: If this method is called after tree is finalized.
"""
if self._finalized:
raise RuntimeError("Cannot add data once tree is finalized.")
if data.size == 0:
return
clipped_data = np.clip(data, self._data_low, self._data_high)
# Increment counts at leaf nodes and then iterate upwards, incrementing
# counts at all ancestors on the path to the root (but not the root itself).
indices = self.get_leaf_indices(clipped_data)
indices, counts = np.unique(indices, return_counts=True)
index_count_map = dict(zip(indices, counts))
while indices[0] != _ROOT_INDEX:
self._tree.update(index_count_map)
new_indices = self.get_parents(indices)
new_index_count_map = collections.Counter()
for i in range(len(indices)):
new_index_count_map[new_indices[i]] += index_count_map[indices[i]]
indices = np.unique(new_indices)
index_count_map = new_index_count_map
return
def finalize(self):
"""Disables calling add_data, and enables calling compute_quantile."""
self._finalized = True
return
def get_leftmost_child(self, parent_index):
"""Returns the leftmost (lowest-numbered) child of the parent_index node.
Args:
parent_index: Index of the parent node.
"""
return parent_index * self._branching_factor + 1
def get_rightmost_child(self, parent_index):
"""Returns the rightmost (highest-numbered) child of the parent_index node.
Args:
parent_index: Index of the parent node.
"""
return (parent_index + 1) * self._branching_factor
def get_left_value(self, index):
"""Returns the minimum value that is mapped to index's subtree.
Args:
index: Index of a node in the tree.
"""
# Find the smallest-index leaf node in this subtree.
while index < self._leftmost_leaf_index:
index = self.get_leftmost_child(index)
return self._data_low + self._range * (
index - self._leftmost_leaf_index) / self._num_leaves
def get_right_value(self, index):
"""Returns the maximum value that is mapped to index's subtree.
Args:
index: Index of a node in the tree.
"""
# Find the largest-index leaf node in this subtree.
while index < self._leftmost_leaf_index:
index = self.get_rightmost_child(index)
return self._data_low + self._range * (index - self._leftmost_leaf_index +
1) / self._num_leaves
def get_noised_count(self, index):
"""Returns a noised version of the count for the given index.
Note that if the count has previously been noised, the same value as before
is returned.
Args:
index: Index of a node in the tree.
"""
if index in self._noised_tree:
return self._noised_tree[index]
noised_count = self._tree[index] + self._gen_noise()
self._noised_tree[index] = noised_count
return noised_count
def compute_quantile(self, quantile):
"""Returns a differentially private estimate of the quantile.
Args:
quantile: A value in [0, 1].
"""
# Ensure no data can be added once a quantile has been computed.
self.finalize()
if quantile < 0.0 or quantile > 1.0:
raise ValueError(
"Quantile must be in [0, 1]; requested quantile {}.".format(quantile))
# Find the (approximate) index of the leaf node containing the quantile.
index = _ROOT_INDEX
while index < self._leftmost_leaf_index:
leftmost_child_index = self.get_leftmost_child(index)
rightmost_child_index = self.get_rightmost_child(index)
# Sum all child nodes' noisy counts.
noised_counts = np.asarray([
self.get_noised_count(i)
for i in range(leftmost_child_index, rightmost_child_index + 1)
])
total = np.sum(noised_counts)
# If all child nodes are "empty", return rank value of current subtree.
if total <= 0.0:
break
# Sum again, but only noisy counts exceeding min_value_cutoff.
min_value_cutoff = total * _ALPHA
passes_cutoff = noised_counts >= min_value_cutoff
filtered_counts = noised_counts[passes_cutoff]
adjusted_total = np.sum(filtered_counts)
if adjusted_total == 0.0:
break
# Find the child whose subtree contains the quantile.
partial_count = 0.0
for i in range(self._branching_factor):
# Skip nodes whose contributions are too small.
if passes_cutoff[i]:
ith_count = noised_counts[i]
partial_count += ith_count
# Break if the current child's subtree contains the quantile.
if partial_count / adjusted_total >= quantile - _NUMERICAL_TOLERANCE:
quantile = (adjusted_total * quantile -
(partial_count - ith_count)) / ith_count
# Truncate at 1; calculated quantile may be larger than 1, due to
# the subtraction of the numerical tolerance value above.
quantile = min(quantile, 1.0)
index = i + leftmost_child_index
break
# Linearly interpolate between the min and max values associated with the
# node of the current index.
return (1 - quantile) * self.get_left_value(
index) + quantile * self.get_right_value(index)
def tree(sampled_data,
data_low,
data_high,
qs,
eps,
delta,
swap,
tree_height=4,
branching_factor=16):
"""Computes (eps, delta)-differentially private quantile estimates for qs.
Creates a PrivateQuantileTree with Laplace noise when delta is zero, and
Gaussian noise otherwise.
Args:
sampled_data: Array of data points.
data_low: Lower bound for data.
data_high: Upper bound for data.
qs: Increasing array of quantiles in (0,1).
eps: Privacy parameter epsilon.
delta: Privacy parameter delta.
swap: If true, uses swap dp sensitivity, otherwise uses add-remove.
tree_height: Height for PrivateQuantileTree.
branching_factor: Branching factor for PrivateQuantileTree.
Returns:
Array o where o[i] is the quantile estimate corresponding to quantile q[i].
"""
noise_type = (
PrivateQuantileTree.NoiseType.LAPLACE
if delta == 0 else PrivateQuantileTree.NoiseType.GAUSSIAN)
t = PrivateQuantileTree(
noise_type=noise_type,
epsilon=eps,
delta=delta,
data_low=data_low,
data_high=data_high,
swap=swap,
tree_height=tree_height,
branching_factor=branching_factor)
t.add_data(sampled_data)
results = np.empty(len(qs))
for i in range(len(qs)):
results[i] = t.compute_quantile(qs[i])
return results
| 0 | 39 | 25 |
81b0da0e4b675061328ec8d1c6ae3f7c6c0db455 | 5,077 | py | Python | kaa/exts/games/_cards.py | haasosaurus/kaa | 476fc93e2c6c9a75c2eaf70f189ef993e877f896 | [
"MIT"
] | 2 | 2021-11-16T01:18:18.000Z | 2021-12-28T06:53:34.000Z | kaa/exts/games/_cards.py | haasosaurus/pythonidae | 476fc93e2c6c9a75c2eaf70f189ef993e877f896 | [
"MIT"
] | null | null | null | kaa/exts/games/_cards.py | haasosaurus/pythonidae | 476fc93e2c6c9a75c2eaf70f189ef993e877f896 | [
"MIT"
] | 5 | 2020-02-23T00:50:30.000Z | 2020-06-24T07:42:45.000Z | # -*- coding: utf-8 -*-
import enum
import random
from typing import Union
class Card:
"""
represents a single playing card
"""
class Deck:
"""
playing card deck class
"""
class Icons:
"""
icon container for Card/Deck
"""
card_back = '🂠'
joker_red = '🂿'
joker_white = '🃏'
joker_black = '🃟'
clubs = '🃑🃒🃓🃔🃕🃖🃗🃘🃙🃚🃛🃝🃞'
clubs_extra = '🃜'
diamonds = '🃁🃂🃃🃄🃅🃆🃇🃈🃉🃊🃋🃍🃎'
diamonds_extra = '🃌'
hearts = '🂱🂲🂳🂴🂵🂶🂷🂸🂹🂺🂻🂽🂾'
hearts_extra = '🂼'
spades = '🂡🂢🂣🂤🂥🂦🂧🂨🂩🂪🂫🂭🂮'
spades_extra = '🂬'
if __name__ == '__main__':
tests()
| 22.766816 | 128 | 0.515068 | # -*- coding: utf-8 -*-
import enum
import random
from typing import Union
class Card:
"""
represents a single playing card
"""
class Suit(enum.Enum):
SPADES = ('Spades', 'S', '♠️')
DIAMONDS = ('Diamonds', 'D', '♣️')
CLUBS = ('Clubs', 'C', '♥️')
HEARTS = ('Hearts', 'H', '♦️')
def __repr__(self):
return f'Card.Suit.{self.name}'
def __str__(self):
return self.__repr__()
def format_long(self):
return self.value[0]
def format_short(self):
return self.value[1]
def format_emoji(self):
return self.value[2]
class Rank(enum.Enum):
ACE = (0, 'Ace', 'A')
TWO = (1, 'Two', '2')
THREE = (2, 'Three', '3')
FOUR = (3, 'Four', '4')
FIVE = (4, 'Five', '5')
SIX = (5, 'Six', '6')
SEVEN = (6, 'Seven', '7')
EIGHT = (7, 'Eight', '8')
NINE = (8, 'Nine', '9')
TEN = (9, 'Ten', '10')
JACK = (10, 'Jack', 'J')
QUEEN = (11, 'Queen', 'Q')
KING = (12, 'King', 'K')
def __repr__(self):
return f'Card.Rank.{self.name}'
def __str__(self):
return self.__repr__()
def __int__(self):
return self.value[0]
def format_long(self):
return self.value[1]
def format_short(self):
return self.value[2]
def __init__(self, rank: Rank, suit: Suit, value: int = None):
self.rank = rank
self.suit = suit
self.value = value
def __repr__(self):
return f'Card({self.rank.__repr__()}, {self.suit.__repr__()})'
def __str__(self):
return self.__repr__()
def format_long(self):
return f'{self.rank.format_long()} of {self.suit.format_long()}'
def format_short(self):
return f'{self.rank.format_short()}{self.suit.format_short()}'
def format_short_emoji(self):
return f'{self.rank.format_short()}{self.suit.format_emoji()}'
class Deck:
"""
playing card deck class
"""
class Icons:
"""
icon container for Card/Deck
"""
card_back = '🂠'
joker_red = '🂿'
joker_white = '🃏'
joker_black = '🃟'
clubs = '🃑🃒🃓🃔🃕🃖🃗🃘🃙🃚🃛🃝🃞'
clubs_extra = '🃜'
diamonds = '🃁🃂🃃🃄🃅🃆🃇🃈🃉🃊🃋🃍🃎'
diamonds_extra = '🃌'
hearts = '🂱🂲🂳🂴🂵🂶🂷🂸🂹🂺🂻🂽🂾'
hearts_extra = '🂼'
spades = '🂡🂢🂣🂤🂥🂦🂧🂨🂩🂪🂫🂭🂮'
spades_extra = '🂬'
def __init__(
self,
cards = None,
*,
jokers: bool = False,
shuffled: bool = False,
):
if cards is not None:
self._cards = cards
else:
self._cards = [Card(rank, suit) for suit in [Card.Suit.SPADES, Card.Suit.DIAMONDS] for rank in Card.Rank]
self._cards.extend(Card(rank, suit) for suit in [Card.Suit.CLUBS, Card.Suit.HEARTS] for rank in reversed(Card.Rank))
if jokers:
raise NotImplementedError('please implement this parameter')
if shuffled:
self.shuffle()
def __len__(self):
return len(self._cards)
def __getitem__(self, s: Union[int, slice]):
return self._cards[s]
def shuffle(self):
random.shuffle(self._cards)
def deal(self):
return self._cards.pop()
def tests():
def print_header(title, line_length=30):
line_length = 30
print('-' * line_length)
indent = line_length // 2 - len(title) // 2
print(f"{' ' * indent}{title}\n{'-' * line_length}")
# debug options
debugging_card = True
# debugging_card = False
debugging_suit = True
debugging_suit = False
debugging_rank = True
debugging_rank = False
debugging_deck = True
debugging_deck = False
# tests - card
if debugging_card:
print_header('Card')
card = Card(Card.Rank.ACE, Card.Suit.SPADES)
print(f'{card.__repr__()}')
print(f'{str(card)=}')
print(f'{card.format_short()=}')
print()
# tests - deck
if debugging_deck:
print_header('Deck')
# print debug info
deck = Deck(shuffled=False)
for card in deck:
print(f' {card.format_long()}')
print()
print(f'{len(deck)=}')
# print(deck.Icons.joker_white)
# print(deck.Icons.clubs[0])
print()
# tests - suit
if debugging_suit:
print_header('Suit')
# print debug info
card = Card(Card.Rank.ACE, Card.Suit.SPADES)
print(f'{str(card.suit)=}')
# for name, member in Card.Suit.__members__.items():
# print(f'name: {name}, member: {member}')
print()
# tests - rank
if debugging_rank:
print_header('Rank')
# print debug info
card = Card(Card.Rank.ACE, Card.Suit.SPADES)
print(f'{int(card.rank)=}')
print(f'{str(card.rank)=}')
print()
if __name__ == '__main__':
tests()
| 3,142 | 906 | 374 |
c48e6a44c4da3a2f27992537d69968cbcb561fe2 | 1,806 | py | Python | nipype/interfaces/freesurfer/__init__.py | effigies/nipype | 18fe222557cf3b9627e06b2a66fba589feaca581 | [
"Apache-2.0"
] | null | null | null | nipype/interfaces/freesurfer/__init__.py | effigies/nipype | 18fe222557cf3b9627e06b2a66fba589feaca581 | [
"Apache-2.0"
] | 2 | 2017-10-05T21:08:38.000Z | 2018-10-09T23:01:23.000Z | nipype/interfaces/freesurfer/__init__.py | effigies/nipype | 18fe222557cf3b9627e06b2a66fba589feaca581 | [
"Apache-2.0"
] | null | null | null | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Top-level namespace for freesurfer."""
from .base import Info, FSCommand, no_freesurfer
from .preprocess import (ParseDICOMDir, UnpackSDICOMDir, MRIConvert, Resample,
ReconAll, BBRegister, ApplyVolTransform, Smooth,
DICOMConvert, RobustRegister, FitMSParams,
SynthesizeFLASH, MNIBiasCorrection, WatershedSkullStrip,
Normalize, CANormalize, CARegister, CALabel, MRIsCALabel,
SegmentCC, SegmentWM, EditWMwithAseg, ConcatenateLTA)
from .model import (MRISPreproc, MRISPreprocReconAll, GLMFit, OneSampleTTest, Binarize,
Concatenate, SegStats, SegStatsReconAll, Label2Vol, MS_LDA,
Label2Label, Label2Annot, SphericalAverage)
from .utils import (SampleToSurface, SurfaceSmooth, SurfaceTransform, Surface2VolTransform,
SurfaceSnapshots, ApplyMask, MRIsConvert, MRITessellate, MRIPretess,
MRIMarchingCubes, SmoothTessellation, MakeAverageSubject,
ExtractMainComponent, Tkregister2, AddXFormToHeader,
CheckTalairachAlignment, TalairachAVI, TalairachQC, RemoveNeck,
MRIFill, MRIsInflate, Sphere, FixTopology, EulerNumber,
RemoveIntersection, MakeSurfaces, Curvature, CurvatureStats,
Jacobian, MRIsCalc, VolumeMask, ParcellationStats, Contrast,
RelabelHypointensities, Aparc2Aseg, Apas2Aseg)
from .longitudinal import (RobustTemplate, FuseSegmentations)
from .registration import (MPRtoMNI305, RegisterAVItoTalairach, EMRegister, Register,
Paint)
| 66.888889 | 91 | 0.668328 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Top-level namespace for freesurfer."""
from .base import Info, FSCommand, no_freesurfer
from .preprocess import (ParseDICOMDir, UnpackSDICOMDir, MRIConvert, Resample,
ReconAll, BBRegister, ApplyVolTransform, Smooth,
DICOMConvert, RobustRegister, FitMSParams,
SynthesizeFLASH, MNIBiasCorrection, WatershedSkullStrip,
Normalize, CANormalize, CARegister, CALabel, MRIsCALabel,
SegmentCC, SegmentWM, EditWMwithAseg, ConcatenateLTA)
from .model import (MRISPreproc, MRISPreprocReconAll, GLMFit, OneSampleTTest, Binarize,
Concatenate, SegStats, SegStatsReconAll, Label2Vol, MS_LDA,
Label2Label, Label2Annot, SphericalAverage)
from .utils import (SampleToSurface, SurfaceSmooth, SurfaceTransform, Surface2VolTransform,
SurfaceSnapshots, ApplyMask, MRIsConvert, MRITessellate, MRIPretess,
MRIMarchingCubes, SmoothTessellation, MakeAverageSubject,
ExtractMainComponent, Tkregister2, AddXFormToHeader,
CheckTalairachAlignment, TalairachAVI, TalairachQC, RemoveNeck,
MRIFill, MRIsInflate, Sphere, FixTopology, EulerNumber,
RemoveIntersection, MakeSurfaces, Curvature, CurvatureStats,
Jacobian, MRIsCalc, VolumeMask, ParcellationStats, Contrast,
RelabelHypointensities, Aparc2Aseg, Apas2Aseg)
from .longitudinal import (RobustTemplate, FuseSegmentations)
from .registration import (MPRtoMNI305, RegisterAVItoTalairach, EMRegister, Register,
Paint)
| 0 | 0 | 0 |
e0e9a4a6b64f80ce663be8d5634e8732072df3c3 | 211 | py | Python | WEEKS/CD_Sata-Structures/_RESOURCES/CODESIGNAL/valid_time.py | webdevhub42/Lambda | b04b84fb5b82fe7c8b12680149e25ae0d27a0960 | [
"MIT"
] | null | null | null | WEEKS/CD_Sata-Structures/_RESOURCES/CODESIGNAL/valid_time.py | webdevhub42/Lambda | b04b84fb5b82fe7c8b12680149e25ae0d27a0960 | [
"MIT"
] | null | null | null | WEEKS/CD_Sata-Structures/_RESOURCES/CODESIGNAL/valid_time.py | webdevhub42/Lambda | b04b84fb5b82fe7c8b12680149e25ae0d27a0960 | [
"MIT"
] | null | null | null | def validTime(time):
"""
Boolean indicating if time is in valid 24hr format
"""
tokens = time.split(':')
hours = int(tokens[0])
minutes = int(tokens[1])
return not (hours > 23 or minutes > 59) | 26.375 | 51 | 0.625592 | def validTime(time):
"""
Boolean indicating if time is in valid 24hr format
"""
tokens = time.split(':')
hours = int(tokens[0])
minutes = int(tokens[1])
return not (hours > 23 or minutes > 59) | 0 | 0 | 0 |
af5759f25b7a68cffdc5af17722bac8dc2f24f92 | 1,672 | py | Python | copy_csv_res.py | Murilodsv/py-jules | 601ca9e6fbad7fe82404f5296babb04583ddda95 | [
"MIT"
] | null | null | null | copy_csv_res.py | Murilodsv/py-jules | 601ca9e6fbad7fe82404f5296babb04583ddda95 | [
"MIT"
] | null | null | null | copy_csv_res.py | Murilodsv/py-jules | 601ca9e6fbad7fe82404f5296babb04583ddda95 | [
"MIT"
] | 1 | 2020-04-13T17:23:54.000Z | 2020-04-13T17:23:54.000Z | # -*- coding: utf-8 -*-
"""
Created on Tue Apr 20 13:32:20 2021
#--- Copy csv results to single folder based on dashboard_db.csv
#--- Murilo Vianna (murilodsv@gmail.com)
#--- May, 2021.
#--- Dev-log in: https://github.com/Murilodsv/py-jules
@author: muril
"""
# DEBUG import os; os.chdir('C:/Murilo/py-jules')
#------------------------------#
#--- generate qsub-clusters ---#
#------------------------------#
dash_nm = 'dashboard_db.csv' # Filename of Dashboard CSV
wd_out = 'ginore/csv_res'
#--- Get scripts arguments
if __name__ == "__main__":
import sys
if len(sys.argv) > 1:
#--- use arguments
dash_nm = str(sys.argv[1]) # debug dash_nm = 'dashboard_db_future.csv'
wd_out = str(sys.argv[2])
#----------------------#
#--- Load libraries ---#
#----------------------#
import os
import util as u
import shutil
import glob
from time import time
#--- Track progress
run_start = time()
#----------------------#
#--- Read dashboard ---#
#----------------------#
#--- get run wd
wd = os.getcwd().replace('\\','/')
#--- Open CSVs
dash = u.df_csv(wd+'/'+dash_nm)
#--- list of clusters
l_ids = dash['run_id'].unique()
for i in l_ids:
print('Copying results for '+i)
#--- list csv files
l_csv = glob.glob(wd+'/jules_run/'+i+'/namelists/output/*.csv')
#--- copy every file to folder
for f in l_csv:
shutil.copyfile(f,
wd+'/'+wd_out+'/'+f.split('/')[-1].split('\\')[-1])
#--- track time
print("\nElapsed time of copying: --- %.3f seconds ---" % (time() - run_start)) | 24.588235 | 89 | 0.504785 | # -*- coding: utf-8 -*-
"""
Created on Tue Apr 20 13:32:20 2021
#--- Copy csv results to single folder based on dashboard_db.csv
#--- Murilo Vianna (murilodsv@gmail.com)
#--- May, 2021.
#--- Dev-log in: https://github.com/Murilodsv/py-jules
@author: muril
"""
# DEBUG import os; os.chdir('C:/Murilo/py-jules')
#------------------------------#
#--- generate qsub-clusters ---#
#------------------------------#
dash_nm = 'dashboard_db.csv' # Filename of Dashboard CSV
wd_out = 'ginore/csv_res'
#--- Get scripts arguments
if __name__ == "__main__":
import sys
if len(sys.argv) > 1:
#--- use arguments
dash_nm = str(sys.argv[1]) # debug dash_nm = 'dashboard_db_future.csv'
wd_out = str(sys.argv[2])
#----------------------#
#--- Load libraries ---#
#----------------------#
import os
import util as u
import shutil
import glob
from time import time
#--- Track progress
run_start = time()
#----------------------#
#--- Read dashboard ---#
#----------------------#
#--- get run wd
wd = os.getcwd().replace('\\','/')
#--- Open CSVs
dash = u.df_csv(wd+'/'+dash_nm)
#--- list of clusters
l_ids = dash['run_id'].unique()
for i in l_ids:
print('Copying results for '+i)
#--- list csv files
l_csv = glob.glob(wd+'/jules_run/'+i+'/namelists/output/*.csv')
#--- copy every file to folder
for f in l_csv:
shutil.copyfile(f,
wd+'/'+wd_out+'/'+f.split('/')[-1].split('\\')[-1])
#--- track time
print("\nElapsed time of copying: --- %.3f seconds ---" % (time() - run_start)) | 0 | 0 | 0 |
e9942c4758244414581d47ccf19b8ab557f3e099 | 1,177 | py | Python | orders/management/commands/order_csv_feed.py | ms0680146/Order_System | 934c3849ad0d72c0ce560706a6857870935e8599 | [
"MIT"
] | null | null | null | orders/management/commands/order_csv_feed.py | ms0680146/Order_System | 934c3849ad0d72c0ce560706a6857870935e8599 | [
"MIT"
] | null | null | null | orders/management/commands/order_csv_feed.py | ms0680146/Order_System | 934c3849ad0d72c0ce560706a6857870935e8599 | [
"MIT"
] | null | null | null | from django.core.management.base import BaseCommand, CommandError
from django.utils.timezone import get_current_timezone
from orders.models import Order
from datetime import datetime
import csv
import pytz | 43.592593 | 145 | 0.642311 | from django.core.management.base import BaseCommand, CommandError
from django.utils.timezone import get_current_timezone
from orders.models import Order
from datetime import datetime
import csv
import pytz
class Command(BaseCommand):
help = 'Feed order_csv file into Order table.'
def add_arguments(self, parser):
parser.add_argument('file_name', type=str, help='The csv file of order data')
def handle(self, *args, **kwargs):
file_name = kwargs['file_name']
tz = get_current_timezone()
# print(file_name, tz)
with open(f'{file_name}.csv') as csv_file:
reader = csv.DictReader(csv_file)
for row in reader:
datetime_object = tz.localize(datetime.strptime(row['created_at'], '%Y/%m/%d %p %I:%M:%S'))
try:
order = Order(order_id=row['order_id'], customer_id=row['customer_id'], shipping=row['shipping'], created_at=datetime_object)
order.save()
except Exception as e:
raise CommandError('Order Save Fail!')
self.stdout.write(self.style.SUCCESS('%s.csv Data Feed Success!' % file_name)) | 838 | 111 | 23 |
761134a35bd916ec48ce32ff5660748d454f087a | 68 | py | Python | dtale_desktop/default_sources/dft_csv/metadata.py | dennislwm/dtale-desktop | 1a034d505f6b45c1ece4c18b83af6ae367d16824 | [
"MIT"
] | 154 | 2020-10-27T00:33:51.000Z | 2022-02-19T13:16:36.000Z | dtale_desktop/default_sources/dft_csv/metadata.py | dennislwm/dtale-desktop | 1a034d505f6b45c1ece4c18b83af6ae367d16824 | [
"MIT"
] | 9 | 2020-10-26T23:48:38.000Z | 2021-02-18T04:13:42.000Z | dtale_desktop/default_sources/dft_csv/metadata.py | dennislwm/dtale-desktop | 1a034d505f6b45c1ece4c18b83af6ae367d16824 | [
"MIT"
] | 15 | 2021-01-31T01:11:20.000Z | 2022-02-17T11:41:27.000Z | import os
display_name = f"csv files in {os.path.expanduser('~')}"
| 17 | 56 | 0.691176 | import os
display_name = f"csv files in {os.path.expanduser('~')}"
| 0 | 0 | 0 |
8a65b2fac9052c66b04cfc5adb41cf66fe6c89b0 | 790 | py | Python | filter_plugins/wrap_list_elements.py | major/ansible-role-aptly | ca975fe854831c0694c0b5dc56be9ac056b7602c | [
"MIT"
] | 2 | 2017-01-24T14:58:25.000Z | 2017-05-23T08:10:28.000Z | filter_plugins/wrap_list_elements.py | major/ansible-role-aptly | ca975fe854831c0694c0b5dc56be9ac056b7602c | [
"MIT"
] | 8 | 2016-12-05T14:33:55.000Z | 2018-07-19T15:33:02.000Z | filter_plugins/wrap_list_elements.py | major/ansible-role-aptly | ca975fe854831c0694c0b5dc56be9ac056b7602c | [
"MIT"
] | 7 | 2015-11-01T15:05:09.000Z | 2019-01-18T09:35:54.000Z | from ansible import errors
#
# Additional Jinja2 filter to wrap list elements with quote
#
def wrap_list_elements(arg):
"""
Wrap each list element with quote, to use before join filter
:param arg: the brute list to manage
:type arg: list
:return: quoted elements
:rtype: list
"""
arg_type = type(arg)
# Check if type is valid
if arg_type != list:
raise errors.AnsibleFilterError(
'Invalid value type "%s", list expected' % arg_type)
return ['"%s"' % element for element in arg]
class FilterModule(object):
""" Filters to manage aptly configuration list values"""
filter_map = {
'wrap_list_elements': wrap_list_elements
}
| 21.351351 | 68 | 0.634177 | from ansible import errors
#
# Additional Jinja2 filter to wrap list elements with quote
#
def wrap_list_elements(arg):
"""
Wrap each list element with quote, to use before join filter
:param arg: the brute list to manage
:type arg: list
:return: quoted elements
:rtype: list
"""
arg_type = type(arg)
# Check if type is valid
if arg_type != list:
raise errors.AnsibleFilterError(
'Invalid value type "%s", list expected' % arg_type)
return ['"%s"' % element for element in arg]
class FilterModule(object):
""" Filters to manage aptly configuration list values"""
filter_map = {
'wrap_list_elements': wrap_list_elements
}
def filters(self):
return self.filter_map
| 28 | 0 | 27 |
b18fc2e4398b5fe692a468df028978c6863ce1e5 | 421 | py | Python | assessement_image_basic.py | JeffLabonte/Computer_Vision_Course_Example | 6da36419b424ef4b65e493f8467aa8a5ffddd30b | [
"MIT"
] | null | null | null | assessement_image_basic.py | JeffLabonte/Computer_Vision_Course_Example | 6da36419b424ef4b65e493f8467aa8a5ffddd30b | [
"MIT"
] | null | null | null | assessement_image_basic.py | JeffLabonte/Computer_Vision_Course_Example | 6da36419b424ef4b65e493f8467aa8a5ffddd30b | [
"MIT"
] | null | null | null | import cv2
import numpy as np
import matplotlib.pyplot as plt
img = cv2.imread("Computer-Vision-with-Python/DATA/dog_backpack.png")
plt.imshow(img)
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
plt.imshow(img_rgb)
new_img = img_rgb.copy()
new_img = cv2.flip(new_img, 0)
plt.imshow(new_img)
pt1 = (200, 380)
pt2 = (600, 700)
cv2.rectangle(img_rgb, pt1=pt1, pt2=pt2, color=(255,0,0), thickness=10)
plt.imshow(img_rgb) | 22.157895 | 71 | 0.743468 | import cv2
import numpy as np
import matplotlib.pyplot as plt
img = cv2.imread("Computer-Vision-with-Python/DATA/dog_backpack.png")
plt.imshow(img)
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
plt.imshow(img_rgb)
new_img = img_rgb.copy()
new_img = cv2.flip(new_img, 0)
plt.imshow(new_img)
pt1 = (200, 380)
pt2 = (600, 700)
cv2.rectangle(img_rgb, pt1=pt1, pt2=pt2, color=(255,0,0), thickness=10)
plt.imshow(img_rgb) | 0 | 0 | 0 |
04a4848fd3a50deebad0f5129e982bbc9c0a69b8 | 150 | py | Python | points/urls.py | rmishra7/cms | 06898eabe708254eedfa410694f86396fcf69f53 | [
"MIT"
] | null | null | null | points/urls.py | rmishra7/cms | 06898eabe708254eedfa410694f86396fcf69f53 | [
"MIT"
] | null | null | null | points/urls.py | rmishra7/cms | 06898eabe708254eedfa410694f86396fcf69f53 | [
"MIT"
] | null | null | null |
from django.conf.urls import url
from points import apis
urlpatterns = [
url(r'^$', apis.PointsTableApi.as_view(), name="api_points_table")
]
| 15 | 70 | 0.713333 |
from django.conf.urls import url
from points import apis
urlpatterns = [
url(r'^$', apis.PointsTableApi.as_view(), name="api_points_table")
]
| 0 | 0 | 0 |
e6377ef30a571471f18d96d915d9f9353c8eae6b | 3,734 | py | Python | MyGame/code/tile_box.py | Chad474/2dPyGame | 22d2c19a5407fa4b539b772facfc5c08e6860ddd | [
"MIT"
] | null | null | null | MyGame/code/tile_box.py | Chad474/2dPyGame | 22d2c19a5407fa4b539b772facfc5c08e6860ddd | [
"MIT"
] | null | null | null | MyGame/code/tile_box.py | Chad474/2dPyGame | 22d2c19a5407fa4b539b772facfc5c08e6860ddd | [
"MIT"
] | null | null | null | import pygame
from tile_movingbox import MovingBox
| 25.751724 | 79 | 0.610337 | import pygame
from tile_movingbox import MovingBox
def check_if_box(block):
if type(block) is Box:
return True
elif type(block) is JumpBox:
return True
elif type(block) is MovingBox:
return True
return False
def static_boxes(box_list):
for box in box_list.sprites():
if box.state == 'AIR':
return False
return True
class Box(pygame.sprite.Sprite):
spd_x = 0
spd_y = 0
def __init__(self, x, y):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load('images/tiles/box.png').convert()
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
self.state = 'STOP'
self.init_x = x
self.init_y = y
def reboot(self, movement):
self.rect.x = self.init_x
self.rect.y = self.init_y
self.spd_y = 0
self.spd_x = 0
self.state = 'STOP'
def calc_movement(self, movement):
if movement == 'UP':
if not self.touch_UP() and self.spd_y == 0:
self.spd_y = -4
self.state = 'AIR'
elif movement == 'DN':
if not self.touch_DN() and self.spd_y == 0:
self.spd_y = 4
self.state = 'AIR'
elif movement == 'RI':
if not self.touch_RI() and self.spd_y == 0:
self.spd_x = 4
self.state = 'AIR'
elif movement == 'LE':
if not self.touch_LE() and self.spd_y == 0:
self.spd_x = -4
self.state = 'AIR'
def touch_UP(self):
self.rect.y -=1
hit_list = pygame.sprite.spritecollide(self, self.level, False)
self.rect.y +=1
if len(hit_list) <= 1:
return False
return True
def touch_DN(self):
self.rect.y +=1
hit_list = pygame.sprite.spritecollide(self, self.level, False)
self.rect.y -=1
if len(hit_list) <= 1:
return False
return True
def touch_RI(self):
self.rect.x +=1
hit_list = pygame.sprite.spritecollide(self, self.level, False)
self.rect.x -=1
if len(hit_list) <= 1:
return False
return True
def touch_LE(self):
self.rect.x -=1
hit_list = pygame.sprite.spritecollide(self, self.level, False)
self.rect.x +=1
if len(hit_list) <= 1:
return False
return True
def collision_y(self):
hit_list = pygame.sprite.spritecollide(self, self.level, False)
for block in hit_list:
if block.ID != self.ID:
if self.spd_y > 0:
self.rect.bottom = block.rect.top
elif self.spd_y < 0:
self.rect.top = block.rect.bottom
self.spd_y = 0
self.state = 'STOP'
def collision_x(self):
hit_list = pygame.sprite.spritecollide(self, self.level, False)
for block in hit_list:
if block.ID != self.ID:
if self.spd_x > 0:
self.rect.right = block.rect.left
elif self.spd_x < 0:
self.rect.left = block.rect.right
self.spd_x = 0
self.state = 'STOP'
def update(self, movement):
if type(self) == JumpBox:
pass
elif self.state == 'STOP':
self.calc_movement(movement)
else:
self.rect.y += self.spd_y
self.collision_y()
self.rect.x += self.spd_x
self.collision_x()
class JumpBox(Box):
def __init__(self, x, y):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load('images/tiles/jumpbox.png').convert()
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
self.state = 'STOP'
self.init_x = x
self.init_y = y
class Box_Stopper(pygame.sprite.Sprite):
spd_x = 0
spd_y = 0
def __init__(self, x, y):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load('images/tiles/boxfilter.png').convert_alpha()
self.rect = self.image.get_rect()
self.rect.y = y
self.rect.x = x | 3,127 | 402 | 151 |
ee69d4394b07343fbd1a670c78a0e3edc5afb890 | 1,221 | py | Python | osmaxx/excerptexport/urls.py | tyrasd/osmaxx | da4454083d17b2ef8b0623cad62e39992b6bd52a | [
"MIT"
] | 27 | 2015-03-30T14:17:26.000Z | 2022-02-19T17:30:44.000Z | osmaxx/excerptexport/urls.py | tyrasd/osmaxx | da4454083d17b2ef8b0623cad62e39992b6bd52a | [
"MIT"
] | 483 | 2015-03-09T16:58:03.000Z | 2022-03-14T09:29:06.000Z | osmaxx/excerptexport/urls.py | tyrasd/osmaxx | da4454083d17b2ef8b0623cad62e39992b6bd52a | [
"MIT"
] | 6 | 2015-04-07T07:38:30.000Z | 2020-04-01T12:45:53.000Z | from django.conf.urls import url
from django.contrib.auth.views import login, logout
from django.views.generic import TemplateView
from osmaxx.excerptexport.views import (
delete_excerpt,
export_list,
export_detail,
manage_own_excerpts,
order_new_excerpt,
order_existing_excerpt,
)
excerpt_export_urlpatterns = [
url(r'^$', TemplateView.as_view(template_name="excerptexport/templates/index.html"), name='index'),
url(r'^exports/$', export_list, name='export_list'),
url(r'^exports/(?P<id>[A-Za-z0-9_-]+)/$', export_detail, name='export_detail'),
url(r'^orders/new/new_excerpt/$', order_new_excerpt, name='order_new_excerpt'),
url(r'^orders/new/existing_excerpt/$', order_existing_excerpt, name='order_existing_excerpt'),
url(r'^excerpts/(?P<pk>[A-Za-z0-9_-]+)/delete/$', delete_excerpt, name='delete_excerpt'),
url(r'^excerpts/$', manage_own_excerpts, name='manage_own_excerpts'),
]
login_logout_patterns = [
url(r'^login/$', login,
{'template_name': 'osmaxx/login.html'}, name='login'),
url(r'^logout/$', logout,
{'template_name': 'osmaxx/logout.html'}, name='logout'),
]
urlpatterns = excerpt_export_urlpatterns + login_logout_patterns
| 34.885714 | 103 | 0.710893 | from django.conf.urls import url
from django.contrib.auth.views import login, logout
from django.views.generic import TemplateView
from osmaxx.excerptexport.views import (
delete_excerpt,
export_list,
export_detail,
manage_own_excerpts,
order_new_excerpt,
order_existing_excerpt,
)
excerpt_export_urlpatterns = [
url(r'^$', TemplateView.as_view(template_name="excerptexport/templates/index.html"), name='index'),
url(r'^exports/$', export_list, name='export_list'),
url(r'^exports/(?P<id>[A-Za-z0-9_-]+)/$', export_detail, name='export_detail'),
url(r'^orders/new/new_excerpt/$', order_new_excerpt, name='order_new_excerpt'),
url(r'^orders/new/existing_excerpt/$', order_existing_excerpt, name='order_existing_excerpt'),
url(r'^excerpts/(?P<pk>[A-Za-z0-9_-]+)/delete/$', delete_excerpt, name='delete_excerpt'),
url(r'^excerpts/$', manage_own_excerpts, name='manage_own_excerpts'),
]
login_logout_patterns = [
url(r'^login/$', login,
{'template_name': 'osmaxx/login.html'}, name='login'),
url(r'^logout/$', logout,
{'template_name': 'osmaxx/logout.html'}, name='logout'),
]
urlpatterns = excerpt_export_urlpatterns + login_logout_patterns
| 0 | 0 | 0 |
b6cb7e0878363791464914093ee1e35bdf2144d8 | 1,905 | py | Python | tests/unit/test_config.py | paulopes/runningtrack | ce01730fd8e92f5411d45705e6a857eedd7374cd | [
"Apache-2.0"
] | 2 | 2019-04-22T03:16:08.000Z | 2019-04-22T22:09:30.000Z | tests/unit/test_config.py | paulopes/runningtrack | ce01730fd8e92f5411d45705e6a857eedd7374cd | [
"Apache-2.0"
] | 4 | 2018-11-10T21:16:16.000Z | 2018-11-10T23:18:59.000Z | tests/unit/test_config.py | paulopes/runningtrack | ce01730fd8e92f5411d45705e6a857eedd7374cd | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
""" Unit tests for the running.config module
"""
from __future__ import print_function, division, unicode_literals
import os
import running.config as config
def test_load_config_ini():
""" Load a test configuration file in .ini format, and
check if the DEFAULT section propagated correctly.
"""
final_dict_should_be = {
"ABC": {
"def": '123',
"ghi": 'a okay',
"vwx": 'one',
"yz": 'two',
},
"MNO": {
"pqr": 'yes',
"vwx": 'one',
"yz": 'two',
"def": '456',
},
}
test_file_path = os.path.join('tests', 'files', 'load_config_ini.ini')
loaded_dict = dict(config.load_config_ini(test_file_path))
for item in loaded_dict:
loaded_dict[item] = dict(loaded_dict[item])
assert loaded_dict == final_dict_should_be
def test_merge_defaults():
""" A dictionary that has an item with a "DEFAULT" key, if
that item is itself a dictionary, then it should merge
that item's subitems with all the other items in the
dictionary that are also themselves dictionaries.
"""
original_dict = {
"ABC": {
"def": 123,
"ghi": 'a okay',
},
"JKL": 9.25,
"MNO": {
"pqr": True,
},
"DEFAULT": {
"vwx": 'one',
"yz": 'two',
"def": 456,
},
}
merged_dict = dict(config.merge_defaults(original_dict))
merged_dict_should_be = {
"ABC": {
"def": 123,
"ghi": 'a okay',
"vwx": 'one',
"yz": 'two',
},
"JKL": 9.25,
"MNO": {
"pqr": True,
"vwx": 'one',
"yz": 'two',
"def": 456,
},
}
assert merged_dict == merged_dict_should_be
| 25.065789 | 74 | 0.493438 | # -*- coding: utf-8 -*-
""" Unit tests for the running.config module
"""
from __future__ import print_function, division, unicode_literals
import os
import running.config as config
def test_load_config_ini():
""" Load a test configuration file in .ini format, and
check if the DEFAULT section propagated correctly.
"""
final_dict_should_be = {
"ABC": {
"def": '123',
"ghi": 'a okay',
"vwx": 'one',
"yz": 'two',
},
"MNO": {
"pqr": 'yes',
"vwx": 'one',
"yz": 'two',
"def": '456',
},
}
test_file_path = os.path.join('tests', 'files', 'load_config_ini.ini')
loaded_dict = dict(config.load_config_ini(test_file_path))
for item in loaded_dict:
loaded_dict[item] = dict(loaded_dict[item])
assert loaded_dict == final_dict_should_be
def test_merge_defaults():
""" A dictionary that has an item with a "DEFAULT" key, if
that item is itself a dictionary, then it should merge
that item's subitems with all the other items in the
dictionary that are also themselves dictionaries.
"""
original_dict = {
"ABC": {
"def": 123,
"ghi": 'a okay',
},
"JKL": 9.25,
"MNO": {
"pqr": True,
},
"DEFAULT": {
"vwx": 'one',
"yz": 'two',
"def": 456,
},
}
merged_dict = dict(config.merge_defaults(original_dict))
merged_dict_should_be = {
"ABC": {
"def": 123,
"ghi": 'a okay',
"vwx": 'one',
"yz": 'two',
},
"JKL": 9.25,
"MNO": {
"pqr": True,
"vwx": 'one',
"yz": 'two',
"def": 456,
},
}
assert merged_dict == merged_dict_should_be
| 0 | 0 | 0 |
3f58e8080b40914eaea3d6b88428011db919a95a | 630 | py | Python | tests/testapp/views.py | matthiask/feincms3-downloads | 962bf2574feb1627a0cf794dd20e038377fb4c1e | [
"MIT"
] | 2 | 2021-04-12T10:36:34.000Z | 2021-10-03T12:17:57.000Z | tests/testapp/views.py | matthiask/feincms3-downloads | 962bf2574feb1627a0cf794dd20e038377fb4c1e | [
"MIT"
] | null | null | null | tests/testapp/views.py | matthiask/feincms3-downloads | 962bf2574feb1627a0cf794dd20e038377fb4c1e | [
"MIT"
] | 1 | 2019-09-29T05:58:25.000Z | 2019-09-29T05:58:25.000Z | from django.shortcuts import get_object_or_404, render
from feincms3 import plugins
from feincms3.regions import Regions
from feincms3.renderer import TemplatePluginRenderer
from .models import HTML, Article, Download
renderer = TemplatePluginRenderer()
renderer.register_string_renderer(HTML, plugins.html.render_html)
renderer.register_template_renderer(Download, "plugins/download.html")
| 31.5 | 87 | 0.774603 | from django.shortcuts import get_object_or_404, render
from feincms3 import plugins
from feincms3.regions import Regions
from feincms3.renderer import TemplatePluginRenderer
from .models import HTML, Article, Download
renderer = TemplatePluginRenderer()
renderer.register_string_renderer(HTML, plugins.html.render_html)
renderer.register_template_renderer(Download, "plugins/download.html")
def article_detail(request, pk):
article = get_object_or_404(Article, pk=pk)
return render(
request,
"article.html",
{"article": article, "regions": Regions.from_item(article, renderer=renderer)},
)
| 213 | 0 | 23 |
396df33f8d1ede98400b2c34baa926f24ec900d4 | 578 | py | Python | scantron/tests.py | suhelhammoud/scantron | fcf64d973342cd63acf5235ce621fed02aeae47d | [
"MIT"
] | null | null | null | scantron/tests.py | suhelhammoud/scantron | fcf64d973342cd63acf5235ce621fed02aeae47d | [
"MIT"
] | null | null | null | scantron/tests.py | suhelhammoud/scantron | fcf64d973342cd63acf5235ce621fed02aeae47d | [
"MIT"
] | null | null | null | from django.test import TestCase
from .resources import StudentResource
from .models import Student
# TODO modify code to do REAL testing | 34 | 64 | 0.65917 | from django.test import TestCase
from .resources import StudentResource
from .models import Student
# TODO modify code to do REAL testing
class StudetTest(TestCase):
def setUp(self):
Student.objects.create(name="Sami", std_id = "2011300")
Student.objects.create(name="Ahmad", std_id = "2011500")
Student.objects.create(name="Omar", std_id = "2011400")
def test_student_export(self):
"""Student Resource"""
ds = StudentResource().export()
print(ds.json)
print(ds.csv)
self.assertEqual('Sami' , 'Sami') # | 188 | 230 | 22 |
427fe59299333a0f870405e8974d8aeb4a098740 | 1,587 | py | Python | deeplabcut/pose_estimation_tensorflow/dataset/pose_dataset.py | serre-lab/deeplabcut_mgh | aa37b104ba4967932528d4f79665648474f51112 | [
"MIT"
] | null | null | null | deeplabcut/pose_estimation_tensorflow/dataset/pose_dataset.py | serre-lab/deeplabcut_mgh | aa37b104ba4967932528d4f79665648474f51112 | [
"MIT"
] | 1 | 2019-12-15T00:37:15.000Z | 2019-12-15T00:37:15.000Z | deeplabcut/pose_estimation_tensorflow/dataset/pose_dataset.py | kalpitthakkar/deeplabcut_mgh_pose | 8fa4a59f422ff0357552e290230838239edcfe1b | [
"MIT"
] | null | null | null | '''
Adapted from DeeperCut by Eldar Insafutdinov
https://github.com/eldar/pose-tensorflow
'''
from enum import Enum
import numpy as np
# Augmentation functions
def CropImage(joints,im,Xlabel,Ylabel,cfg):
''' Randomly cropping image around xlabel,ylabel taking into account size of image. Introduced in DLC 2 '''
widthforward=int(cfg["minsize"]+np.random.randint(cfg["rightwidth"]))
widthback=int(cfg["minsize"]+np.random.randint(cfg["leftwidth"]))
hup=int(cfg["minsize"]+np.random.randint(cfg["topheight"]))
hdown=int(cfg["minsize"]+np.random.randint(cfg["bottomheight"]))
Xstart=max(0,int(Xlabel-widthback))
Xstop=min(np.shape(im)[1]-1,int(Xlabel+widthforward))
Ystart=max(0,int(Ylabel-hdown))
Ystop=min(np.shape(im)[0]-1,int(Ylabel+hup))
joints[0,:,1]-=Xstart
joints[0,:,2]-=Ystart
inbounds=np.where((joints[0,:,1]>0)*(joints[0,:,1]<np.shape(im)[1])*(joints[0,:,2]>0)*(joints[0,:,2]<np.shape(im)[0]))[0]
return joints[:,inbounds,:],im[Ystart:Ystop+1,Xstart:Xstop+1,:]
| 31.117647 | 125 | 0.672968 | '''
Adapted from DeeperCut by Eldar Insafutdinov
https://github.com/eldar/pose-tensorflow
'''
from enum import Enum
import numpy as np
class Batch(Enum):
inputs = 0
part_score_targets = 1
part_score_weights = 2
locref_targets = 3
locref_mask = 4
pairwise_targets = 5
pairwise_mask = 6
data_item = 7
class DataItem:
pass
def data_to_input(data):
return np.expand_dims(data, axis=0).astype(float)
# Augmentation functions
def mirror_joints_map(all_joints, num_joints):
res = np.arange(num_joints)
symmetric_joints = [p for p in all_joints if len(p) == 2]
for pair in symmetric_joints:
res[pair[0]] = pair[1]
res[pair[1]] = pair[0]
return res
def CropImage(joints,im,Xlabel,Ylabel,cfg):
''' Randomly cropping image around xlabel,ylabel taking into account size of image. Introduced in DLC 2 '''
widthforward=int(cfg["minsize"]+np.random.randint(cfg["rightwidth"]))
widthback=int(cfg["minsize"]+np.random.randint(cfg["leftwidth"]))
hup=int(cfg["minsize"]+np.random.randint(cfg["topheight"]))
hdown=int(cfg["minsize"]+np.random.randint(cfg["bottomheight"]))
Xstart=max(0,int(Xlabel-widthback))
Xstop=min(np.shape(im)[1]-1,int(Xlabel+widthforward))
Ystart=max(0,int(Ylabel-hdown))
Ystop=min(np.shape(im)[0]-1,int(Ylabel+hup))
joints[0,:,1]-=Xstart
joints[0,:,2]-=Ystart
inbounds=np.where((joints[0,:,1]>0)*(joints[0,:,1]<np.shape(im)[1])*(joints[0,:,2]>0)*(joints[0,:,2]<np.shape(im)[0]))[0]
return joints[:,inbounds,:],im[Ystart:Ystop+1,Xstart:Xstop+1,:]
| 287 | 177 | 91 |
a0a41812b1925cad41bf397f94704699fb2e3b66 | 3,096 | py | Python | tests/test_kind.py | lycantropos/shewchuk | 0091891e57d6700d71ad38d6c445397a2c53c2b7 | [
"MIT"
] | null | null | null | tests/test_kind.py | lycantropos/shewchuk | 0091891e57d6700d71ad38d6c445397a2c53c2b7 | [
"MIT"
] | null | null | null | tests/test_kind.py | lycantropos/shewchuk | 0091891e57d6700d71ad38d6c445397a2c53c2b7 | [
"MIT"
] | null | null | null | from typing import Tuple
from hypothesis import given
from shewchuk import (kind,
vectors_dot_product)
from tests.utils import (exact_kind,
to_sign)
from . import strategies
@given(strategies.floats_sextuplets)
@given(strategies.floats_quadruplets)
@given(strategies.floats_sextuplets)
@given(strategies.floats_sextuplets)
| 40.736842 | 76 | 0.676034 | from typing import Tuple
from hypothesis import given
from shewchuk import (kind,
vectors_dot_product)
from tests.utils import (exact_kind,
to_sign)
from . import strategies
@given(strategies.floats_sextuplets)
def test_basic(sextuplet: Tuple[float, float, float, float, float, float]
) -> None:
(vertex_x, vertex_y, first_ray_second_ray_point_x,
first_ray_second_ray_point_y, second_ray_point_x,
second_ray_point_y) = sextuplet
result = kind(vertex_x, vertex_y, first_ray_second_ray_point_x,
first_ray_second_ray_point_y, second_ray_point_x,
second_ray_point_y)
assert isinstance(result, int)
assert result in (-1, 0, 1)
@given(strategies.floats_quadruplets)
def test_endpoints(quadruplet: Tuple[float, float, float, float]) -> None:
(vertex_x, vertex_y, first_ray_second_ray_point_x,
first_ray_second_ray_point_y) = quadruplet
assert not kind(vertex_x, vertex_y, first_ray_second_ray_point_x,
first_ray_second_ray_point_y, vertex_x, vertex_y)
assert (kind(vertex_x, vertex_y, first_ray_second_ray_point_x,
first_ray_second_ray_point_y, first_ray_second_ray_point_x,
first_ray_second_ray_point_y)
== (vertex_x != first_ray_second_ray_point_x
or vertex_y != first_ray_second_ray_point_y))
@given(strategies.floats_sextuplets)
def test_endpoints_permutation(sextuplet: Tuple[float, float, float, float,
float, float]) -> None:
(vertex_x, vertex_y, first_ray_second_ray_point_x,
first_ray_second_ray_point_y, second_ray_point_x,
second_ray_point_y) = sextuplet
result = kind(vertex_x, vertex_y, first_ray_second_ray_point_x,
first_ray_second_ray_point_y, second_ray_point_x,
second_ray_point_y)
assert result == kind(vertex_x, vertex_y, second_ray_point_x,
second_ray_point_y, first_ray_second_ray_point_x,
first_ray_second_ray_point_y)
@given(strategies.floats_sextuplets)
def test_alternatives(sextuplet: Tuple[float, float, float, float, float,
float]) -> None:
(vertex_x, vertex_y, first_ray_second_ray_point_x,
first_ray_second_ray_point_y, second_ray_point_x,
second_ray_point_y) = sextuplet
result = kind(vertex_x, vertex_y, first_ray_second_ray_point_x,
first_ray_second_ray_point_y, second_ray_point_x,
second_ray_point_y)
assert result == to_sign(vectors_dot_product(
vertex_x, vertex_y, first_ray_second_ray_point_x,
first_ray_second_ray_point_y, vertex_x, vertex_y,
second_ray_point_x, second_ray_point_y))
assert result == exact_kind(vertex_x, vertex_y,
first_ray_second_ray_point_x,
first_ray_second_ray_point_y,
second_ray_point_x, second_ray_point_y)
| 2,628 | 0 | 88 |
62e477f0f1ec380d60c2fbcd1f119ae3c51edaf5 | 486 | py | Python | setup.py | csmarfan/petpy | d5edaea43a6657e6a93a1f01f3dd27e9e8fbb0cf | [
"Apache-2.0"
] | null | null | null | setup.py | csmarfan/petpy | d5edaea43a6657e6a93a1f01f3dd27e9e8fbb0cf | [
"Apache-2.0"
] | 2 | 2019-06-19T12:16:12.000Z | 2019-06-19T12:38:02.000Z | setup.py | csmarfan/petpy | d5edaea43a6657e6a93a1f01f3dd27e9e8fbb0cf | [
"Apache-2.0"
] | 1 | 2019-06-19T12:27:06.000Z | 2019-06-19T12:27:06.000Z | from setuptools import setup
setup(name='petpy',
version='0.1',
description='Petrophysics utilities',
url='https://example.com/',
author = 'Fan',
author_email='yuanzhong.fan@shell.com',
license = 'Apache 2',
pakages=['petpy'],
install_requires=['numpy'],
test_require = ['pytest','pytest-cov'],
entry_points={'console_scripts':
['gardner=petpy.__main__:main',
]}
) | 30.375 | 47 | 0.54321 | from setuptools import setup
setup(name='petpy',
version='0.1',
description='Petrophysics utilities',
url='https://example.com/',
author = 'Fan',
author_email='yuanzhong.fan@shell.com',
license = 'Apache 2',
pakages=['petpy'],
install_requires=['numpy'],
test_require = ['pytest','pytest-cov'],
entry_points={'console_scripts':
['gardner=petpy.__main__:main',
]}
) | 0 | 0 | 0 |
1f38fc45c9f57f950dc20432a34d9b30298bfa71 | 2,514 | py | Python | dask_saturn/backoff.py | saturncloud/dask-saturn | 9c9733f68fee26e60f544ab9653bc9056b7543e5 | [
"BSD-3-Clause"
] | 12 | 2020-05-19T17:18:06.000Z | 2022-03-24T01:25:57.000Z | dask_saturn/backoff.py | saturncloud/dask-saturn | 9c9733f68fee26e60f544ab9653bc9056b7543e5 | [
"BSD-3-Clause"
] | 32 | 2020-04-01T13:43:38.000Z | 2021-12-21T18:42:13.000Z | dask_saturn/backoff.py | saturncloud/dask-saturn | 9c9733f68fee26e60f544ab9653bc9056b7543e5 | [
"BSD-3-Clause"
] | 3 | 2020-04-28T13:52:22.000Z | 2021-09-15T02:03:28.000Z | """
Lightweight implementation of exponential backoff,
used for operations that require polling. This is simple enough
that it isn't worth bringing in a new dependency for it.
"""
from time import sleep
from datetime import datetime
from math import ceil
from random import randrange
class ExpBackoff:
"""
``SaturnCluster._start()`` requires polling until the
Dask scheduled comes up. Exponential backoff is better
in these situations than fixed-wait-time polling, because
it minimizes the number of requests that need to be
made from the beginning of polling to the time the
scheduler is up.
"""
def __init__(self, wait_timeout: int = 1200, min_sleep: int = 5, max_sleep: int = 60):
"""
Used to generate sleep times with a capped exponential backoff.
Jitter reduces contention on the event of multiple clients making
these calls at the same time.
:param wait_timeout: Maximum total time in seconds to wait before timing out
:param min_sleep: Minimum amount of time to sleep in seconds
:param max_sleep: Maximum time to sleep over one period in seconds
:return: Boolean indicating if current wait time is less than wait_timeout
"""
self.wait_timeout = wait_timeout
self.max_sleep = max_sleep
self.min_sleep = min_sleep
self.retries = 0
self.start_time = None
def wait(self) -> bool:
"""
This methods returns ``False`` if the timeout has been
exceeded and code that is using ``ExpBackoff`` for polling
should just consider the polling failed.
If there there is still time left until
``self.wait_timeout``, waits for some time and then
returns ``True``.
"""
if self.retries == 0:
self.start_time = datetime.now()
# Check if timeout has been reached
time_delta = (datetime.now() - self.start_time).total_seconds()
if time_delta >= self.wait_timeout:
return False
# Generate exp backoff with jitter
self.retries += 1
backoff = min(self.max_sleep, self.min_sleep * 2 ** self.retries) / 2
jitter = randrange(0, ceil(backoff))
wait_time = backoff + jitter
# Make sure we aren't waiting longer than wait_timeout
remaining_time = self.wait_timeout - time_delta
if remaining_time < wait_time:
wait_time = remaining_time
sleep(wait_time)
return True
| 35.408451 | 90 | 0.663087 | """
Lightweight implementation of exponential backoff,
used for operations that require polling. This is simple enough
that it isn't worth bringing in a new dependency for it.
"""
from time import sleep
from datetime import datetime
from math import ceil
from random import randrange
class ExpBackoff:
"""
``SaturnCluster._start()`` requires polling until the
Dask scheduled comes up. Exponential backoff is better
in these situations than fixed-wait-time polling, because
it minimizes the number of requests that need to be
made from the beginning of polling to the time the
scheduler is up.
"""
def __init__(self, wait_timeout: int = 1200, min_sleep: int = 5, max_sleep: int = 60):
"""
Used to generate sleep times with a capped exponential backoff.
Jitter reduces contention on the event of multiple clients making
these calls at the same time.
:param wait_timeout: Maximum total time in seconds to wait before timing out
:param min_sleep: Minimum amount of time to sleep in seconds
:param max_sleep: Maximum time to sleep over one period in seconds
:return: Boolean indicating if current wait time is less than wait_timeout
"""
self.wait_timeout = wait_timeout
self.max_sleep = max_sleep
self.min_sleep = min_sleep
self.retries = 0
self.start_time = None
def wait(self) -> bool:
"""
This methods returns ``False`` if the timeout has been
exceeded and code that is using ``ExpBackoff`` for polling
should just consider the polling failed.
If there there is still time left until
``self.wait_timeout``, waits for some time and then
returns ``True``.
"""
if self.retries == 0:
self.start_time = datetime.now()
# Check if timeout has been reached
time_delta = (datetime.now() - self.start_time).total_seconds()
if time_delta >= self.wait_timeout:
return False
# Generate exp backoff with jitter
self.retries += 1
backoff = min(self.max_sleep, self.min_sleep * 2 ** self.retries) / 2
jitter = randrange(0, ceil(backoff))
wait_time = backoff + jitter
# Make sure we aren't waiting longer than wait_timeout
remaining_time = self.wait_timeout - time_delta
if remaining_time < wait_time:
wait_time = remaining_time
sleep(wait_time)
return True
| 0 | 0 | 0 |
bec83fb8bd6c75559aaf20eeef95d3956142f1c5 | 3,015 | py | Python | gazeclassify/tests/unit/test_OpenCVVideoReader.py | Flow000/gazeclassify | dda4c8cd62ad84615f4272171f1635ab683f9bed | [
"MIT"
] | 6 | 2021-02-25T01:17:09.000Z | 2022-03-19T07:13:52.000Z | gazeclassify/tests/unit/test_OpenCVVideoReader.py | Flow000/gazeclassify | dda4c8cd62ad84615f4272171f1635ab683f9bed | [
"MIT"
] | 3 | 2021-05-10T07:38:24.000Z | 2021-06-07T12:59:29.000Z | gazeclassify/tests/unit/test_OpenCVVideoReader.py | Flow000/gazeclassify | dda4c8cd62ad84615f4272171f1635ab683f9bed | [
"MIT"
] | 1 | 2021-06-24T12:58:01.000Z | 2021-06-24T12:58:01.000Z | import io
from dataclasses import dataclass
import cv2 # type: ignore
import numpy as np # type: ignore
from PIL import Image # type: ignore
@dataclass
| 34.261364 | 107 | 0.668657 | import io
from dataclasses import dataclass
import cv2 # type: ignore
import numpy as np # type: ignore
from PIL import Image # type: ignore
@dataclass
class VideoHandle:
stream: cv2.VideoCapture
width: int
height: int
fps: int
class TestOpenCVVideoReader:
def test_read_two_video_frames_opencv_results_in_two_frames(self) -> None:
capture = cv2.VideoCapture("gazeclassify/example_data/two_frames.mp4")
frame_counter = -1
while True:
has_frame, frame = capture.read()
frame_counter += 1
if not has_frame:
break
assert frame_counter == 2
def test_read_videoframe_to_bytes_and_back_to_image(self) -> None:
capture = cv2.VideoCapture("gazeclassify/example_data/frame.mp4")
_, frame = capture.read()
# https://stackoverflow.com/a/64849668 Convert to bytes
bytesframe = cv2.imencode('.jpg', frame)[1].tobytes()
image = cv2.imdecode(np.frombuffer(bytesframe, np.uint8), cv2.IMREAD_COLOR)
rgb_image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
img_converted = Image.fromarray(rgb_image)
capture.release()
assert frame.shape == rgb_image.shape
def test_read_videoframe_to_bytes_to_bytesIO_back_to_bytes_and_back_to_image(self) -> None:
capture = cv2.VideoCapture("gazeclassify/example_data/frame.mp4")
has_frames, frame = capture.read()
# https://stackoverflow.com/a/64849668 Convert to bytes
bytesframe = cv2.imencode('.jpg', frame)[1].tobytes()
# Convert to BytesIO and back
bytesioframe = io.BytesIO(bytesframe)
reconvert = bytesioframe.getvalue()
# Read image with cv2
image = cv2.imdecode(np.frombuffer(bytesframe, np.uint8), cv2.IMREAD_COLOR)
rgb_image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
capture.release()
assert frame.shape == rgb_image.shape
def test_get_video_width_and_height_and_fps(self) -> None:
capture = cv2.VideoCapture("gazeclassify/example_data/frame.mp4")
width = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(capture.get(cv2.CAP_PROP_FPS))
frame_nr = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))
capture.release()
assert width == 1088
assert height == 1080
assert fps == 1
assert frame_nr == 1
def test_write_video(self) -> None:
capture = cv2.VideoCapture("gazeclassify/example_data/frame.mp4")
width = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(capture.get(cv2.CAP_PROP_FPS))
has_frames, frame = capture.read()
capture.release()
codec = cv2.VideoWriter_fourcc(*'DIVX')
writer = cv2.VideoWriter("gazeclassify/example_data/frame_export.mp4", codec, fps, (width, height))
writer.write(frame)
writer.release()
| 2,599 | 77 | 179 |
dcabc35ace7762a3ac7e2768db9464f713687e1e | 2,656 | py | Python | account/models.py | Emmastro/africanlibraries | 6755dd5a7d3453c7ba6e63d49071f9f5af280f71 | [
"Apache-2.0"
] | null | null | null | account/models.py | Emmastro/africanlibraries | 6755dd5a7d3453c7ba6e63d49071f9f5af280f71 | [
"Apache-2.0"
] | null | null | null | account/models.py | Emmastro/africanlibraries | 6755dd5a7d3453c7ba6e63d49071f9f5af280f71 | [
"Apache-2.0"
] | null | null | null | from django.db import models
from django.urls import reverse
from django.contrib.auth.models import User
class Reader(User):
"""
A reader is any school member that can have access to the library for
reading purpose
"""
# Reader's school
school = models.ForeignKey('schools.School',
on_delete=models.CASCADE)
#Get the reader image for a better interaction with other readers
image = models.ImageField(upload_to="Reader_Profile",
verbose_name="Profil Image", null=True, blank=True)
#Save all the reader's reading for creating an history and customize the book suggestions
reading = models.ManyToManyField('schools.Reading',
blank=True)
book_registered = models.ManyToManyField('schools.Book', blank=True)
# Save the reader preference of a customized reading suggestions
category_preference = models.ManyToManyField('schools.Category', blank=True)
author_preference = models.ManyToManyField('account.Author', blank=True)
def image_url(self):
"""
"""
if self.image and hasattr(self.image, "url"):
return self.image.url
class Meta(object):
"""docstring for Meta"""
verbose_name = 'Reader'
#verbose_name_plural = _('Reader')
class Author(User):
"""
Author for an eBook on the website (for online reading/download or sale)
"""
#Get the reader image for a better interaction with other readers
image = models.ImageField(upload_to="Reader_Profile",
verbose_name="Profil Image", null=True, blank=True)
bio = models.TextField(null=True)
class Meta(object):
"""docstring for Meta"""
verbose_name = 'Author'
class Administrator(User):
""" Admin Account for the school library.
Affectations: - Register the school
- Can Register all students
- Access to the full school dashboard
- Pay for the service
"""
school = models.ForeignKey('schools.School',
on_delete=models.CASCADE, null=True, blank=True)
#Get the reader image for a better interaction with other readers
image = models.ImageField(upload_to="Reader_Profile",
verbose_name="Profil Image", null=True, blank=True)
# Payment methodes
creditCardNumber = models.IntegerField( null=True, blank=True)
def image_url(self):
"""
"""
if self.image and hasattr(self.image, "url"):
return self.image.url
class Meta(object):
"""docstring for Meta"""
verbose_name = 'Administrator'
#verbose_name_plural = _('Reader')
| 25.295238 | 90 | 0.725904 | from django.db import models
from django.urls import reverse
from django.contrib.auth.models import User
class Reader(User):
"""
A reader is any school member that can have access to the library for
reading purpose
"""
# Reader's school
school = models.ForeignKey('schools.School',
on_delete=models.CASCADE)
#Get the reader image for a better interaction with other readers
image = models.ImageField(upload_to="Reader_Profile",
verbose_name="Profil Image", null=True, blank=True)
#Save all the reader's reading for creating an history and customize the book suggestions
reading = models.ManyToManyField('schools.Reading',
blank=True)
book_registered = models.ManyToManyField('schools.Book', blank=True)
# Save the reader preference of a customized reading suggestions
category_preference = models.ManyToManyField('schools.Category', blank=True)
author_preference = models.ManyToManyField('account.Author', blank=True)
def image_url(self):
"""
"""
if self.image and hasattr(self.image, "url"):
return self.image.url
def get_absolute_url(self):
return reverse('accounts', kwargs={'pk': self.pk})
class Meta(object):
"""docstring for Meta"""
verbose_name = 'Reader'
#verbose_name_plural = _('Reader')
class Author(User):
"""
Author for an eBook on the website (for online reading/download or sale)
"""
#Get the reader image for a better interaction with other readers
image = models.ImageField(upload_to="Reader_Profile",
verbose_name="Profil Image", null=True, blank=True)
bio = models.TextField(null=True)
def image_url(self):
if self.image and hasattr(self.image, "url"):
return self.image.url
class Meta(object):
"""docstring for Meta"""
verbose_name = 'Author'
class Administrator(User):
""" Admin Account for the school library.
Affectations: - Register the school
- Can Register all students
- Access to the full school dashboard
- Pay for the service
"""
school = models.ForeignKey('schools.School',
on_delete=models.CASCADE, null=True, blank=True)
#Get the reader image for a better interaction with other readers
image = models.ImageField(upload_to="Reader_Profile",
verbose_name="Profil Image", null=True, blank=True)
# Payment methodes
creditCardNumber = models.IntegerField( null=True, blank=True)
def image_url(self):
"""
"""
if self.image and hasattr(self.image, "url"):
return self.image.url
def get_absolute_url(self):
return reverse('accounts', kwargs={'pk': self.pk})
class Meta(object):
"""docstring for Meta"""
verbose_name = 'Administrator'
#verbose_name_plural = _('Reader')
| 195 | 0 | 72 |
6b12411d483da832edf7bcaa1cfa3bafb65bbb40 | 9,403 | py | Python | tekore/_auth/expiring/client.py | Allerter/tekore | 20cf68280fb5b691126600a5b474ee841f7be199 | [
"MIT"
] | 135 | 2020-01-14T17:47:26.000Z | 2022-03-25T18:30:04.000Z | tekore/_auth/expiring/client.py | Allerter/tekore | 20cf68280fb5b691126600a5b474ee841f7be199 | [
"MIT"
] | 135 | 2020-01-13T22:56:35.000Z | 2022-03-11T19:41:36.000Z | tekore/_auth/expiring/client.py | Allerter/tekore | 20cf68280fb5b691126600a5b474ee841f7be199 | [
"MIT"
] | 21 | 2020-01-16T16:01:23.000Z | 2022-02-17T12:46:32.000Z | from base64 import b64encode as _b64encode
from typing import Tuple
from hashlib import sha256
from secrets import token_urlsafe
from urllib.parse import urlencode
from .decor import parse_token, parse_refreshed_token
from .token import Token
from ..scope import Scope
from ..._sender import Sender, Client, send_and_process, Request
OAUTH_AUTHORIZE_URL = 'https://accounts.spotify.com/authorize'
OAUTH_TOKEN_URL = 'https://accounts.spotify.com/api/token'
def b64encode(msg: str) -> str:
"""Encode a unicode string in base-64."""
return _b64encode(msg.encode()).decode()
def b64urlencode(msg: bytes) -> str:
"""Encode bytes in url-safe base-64 alphabet."""
encoded = _b64encode(msg).decode()
stripped = encoded.split("=")[0]
return stripped.replace("+", "-").replace("/", "_")
class Credentials(Client):
"""
Client for retrieving access tokens.
Parameters
----------
client_id
client id
client_secret
client secret, not required for PKCE user authorisation
redirect_uri
whitelisted redirect URI, required for user authorisation
sender
request sender
asynchronous
synchronicity requirement
"""
@send_and_process(parse_token(uses_pkce=False))
def request_client_token(self) -> Token:
"""
Request a client token.
Returns
-------
Token
client access token
"""
payload = {'grant_type': 'client_credentials'}
return self._token_request(payload, auth=True), ()
def user_authorisation_url(
self,
scope=None,
state: str = None,
show_dialog: bool = False
) -> str:
"""
Construct an authorisation URL.
Step 1/2 in authorisation code flow.
User should be redirected to the resulting URL for authorisation.
Step 2/2: :meth:`request_user_token`.
Parameters
----------
scope
token privileges, accepts a :class:`Scope`, a single :class:`scope`,
a list of :class:`scopes <scope>` and strings for :class:`Scope`,
or a space-separated list of scopes as a string
state
additional state
show_dialog
force login dialog even if previously authorised
Returns
-------
str
login URL
"""
payload = self._user_auth_payload(scope, state)
payload['show_dialog'] = str(show_dialog).lower()
return OAUTH_AUTHORIZE_URL + '?' + urlencode(payload)
@send_and_process(parse_token(uses_pkce=False))
def request_user_token(self, code: str) -> Token:
"""
Request a new user token.
Step 2/2 in authorisation code flow.
Code is provided as a URL parameter in the redirect URI
after login in step 1: :meth:`user_authorisation_url`.
Parameters
----------
code
code from redirect parameters
Returns
-------
Token
user access token
"""
payload = {
'code': code,
'redirect_uri': self.redirect_uri,
'grant_type': 'authorization_code'
}
return self._token_request(payload, auth=True), ()
@send_and_process(parse_refreshed_token(uses_pkce=False))
def refresh_user_token(self, refresh_token: str) -> Token:
"""
Request a refreshed user token.
Parameters
----------
refresh_token
refresh token
Returns
-------
Token
refreshed user access token
"""
payload = {
'refresh_token': refresh_token,
'grant_type': 'refresh_token'
}
return self._token_request(payload, auth=True), (refresh_token,)
def pkce_user_authorisation(
self,
scope=None,
state: str = None,
verifier_bytes: int = 32,
) -> Tuple[str, str]:
"""
Construct authorisation URL and verifier.
Step 1/2 in authorisation code flow with proof key for code exchange.
The user should be redirected to the resulting URL for authorisation.
The verifier is passed to :meth:`request_pkce_token` in step 2.
Parameters
----------
scope
token privileges, accepts a :class:`Scope`, a single :class:`scope`,
a list of :class:`scopes <scope>` and strings for :class:`Scope`,
or a space-separated list of scopes as a string
state
additional state
verifier_bytes
number of bytes to generate PKCE verifier with, ``32 <= bytes <= 96``.
The specified range of bytes generates the appropriate number of
characters (43 - 128) after base-64 encoding, as required in RFC 7636.
Returns
-------
Tuple[str, str]
authorisation URL and PKCE code verifier
"""
assert 32 <= verifier_bytes <= 96, 'Invalid number of verifier bytes!'
verifier = token_urlsafe(verifier_bytes)
sha = sha256(verifier.encode())
challenge = b64urlencode(sha.digest())
payload = self._user_auth_payload(scope, state)
payload['code_challenge'] = challenge
payload['code_challenge_method'] = 'S256'
auth_url = OAUTH_AUTHORIZE_URL + '?' + urlencode(payload)
return auth_url, verifier
@send_and_process(parse_token(uses_pkce=True))
def request_pkce_token(self, code: str, verifier: str) -> Token:
"""
Request a new PKCE user token.
Step 2/2 in authorisation code flow with proof key for code exchange.
Code is provided as a URL parameter in the redirect URI
after login in step 1: :meth:`pkce_user_authorisation`.
Parameters
----------
code
code from redirect parameters
verifier
PKCE code verifier generated for authorisation URL
Returns
-------
Token
user access token
"""
payload = {
'client_id': self.client_id,
'code': code,
'code_verifier': verifier,
'grant_type': 'authorization_code',
'redirect_uri': self.redirect_uri,
}
return self._token_request(payload, auth=False), ()
@send_and_process(parse_refreshed_token(uses_pkce=True))
def refresh_pkce_token(self, refresh_token: str) -> Token:
"""
Request a refreshed PKCE user token.
Parameters
----------
refresh_token
refresh token
Returns
-------
Token
refreshed user access token
"""
payload = {
'client_id': self.client_id,
'grant_type': 'refresh_token',
'refresh_token': refresh_token,
}
return self._token_request(payload, auth=False), (refresh_token,)
def refresh(self, token: Token) -> Token:
"""
Refresh an access token.
Both client and user tokens are accepted and refreshed.
The correct refreshing method is applied regardless if PKCE was used or not.
For client tokens, a new token is returned.
For user tokens, a refreshed token is returned.
Parameters
----------
token
token to be refreshed
Returns
-------
Token
refreshed access token
"""
if token.refresh_token is None:
return self.request_client_token()
elif token.uses_pkce:
return self.refresh_pkce_token(token.refresh_token)
else:
return self.refresh_user_token(token.refresh_token)
| 30.332258 | 84 | 0.584175 | from base64 import b64encode as _b64encode
from typing import Tuple
from hashlib import sha256
from secrets import token_urlsafe
from urllib.parse import urlencode
from .decor import parse_token, parse_refreshed_token
from .token import Token
from ..scope import Scope
from ..._sender import Sender, Client, send_and_process, Request
OAUTH_AUTHORIZE_URL = 'https://accounts.spotify.com/authorize'
OAUTH_TOKEN_URL = 'https://accounts.spotify.com/api/token'
def b64encode(msg: str) -> str:
"""Encode a unicode string in base-64."""
return _b64encode(msg.encode()).decode()
def b64urlencode(msg: bytes) -> str:
"""Encode bytes in url-safe base-64 alphabet."""
encoded = _b64encode(msg).decode()
stripped = encoded.split("=")[0]
return stripped.replace("+", "-").replace("/", "_")
class Credentials(Client):
"""
Client for retrieving access tokens.
Parameters
----------
client_id
client id
client_secret
client secret, not required for PKCE user authorisation
redirect_uri
whitelisted redirect URI, required for user authorisation
sender
request sender
asynchronous
synchronicity requirement
"""
def __init__(
self,
client_id: str,
client_secret: str = None,
redirect_uri: str = None,
sender: Sender = None,
asynchronous: bool = None,
):
super().__init__(sender, asynchronous)
self.client_id = client_id
self.client_secret = client_secret
self.redirect_uri = redirect_uri
def __repr__(self):
options = [
f'client_id={self.client_id!r}',
f'client_secret={self.client_secret!r}',
f'redirect_uri={self.redirect_uri!r}',
f'sender={self.sender!r}',
]
return type(self).__name__ + '(' + ', '.join(options) + ')'
def _token_request(self, payload: dict, auth: bool) -> Request:
if auth:
if self.client_secret is None:
raise ValueError(
f'A client secret is required! Got `{self.client_secret}`.'
)
token = b64encode(self.client_id + ':' + self.client_secret)
headers = {'Authorization': f'Basic {token}'}
else:
headers = None
return Request('POST', OAUTH_TOKEN_URL, data=payload, headers=headers)
@send_and_process(parse_token(uses_pkce=False))
def request_client_token(self) -> Token:
"""
Request a client token.
Returns
-------
Token
client access token
"""
payload = {'grant_type': 'client_credentials'}
return self._token_request(payload, auth=True), ()
def _user_auth_payload(self, scope, state):
payload = {
'client_id': self.client_id,
'redirect_uri': self.redirect_uri,
'response_type': 'code',
}
if isinstance(scope, list):
scope = Scope(*scope)
if scope is not None:
payload['scope'] = str(scope)
if state is not None:
payload['state'] = state
return payload
def user_authorisation_url(
self,
scope=None,
state: str = None,
show_dialog: bool = False
) -> str:
"""
Construct an authorisation URL.
Step 1/2 in authorisation code flow.
User should be redirected to the resulting URL for authorisation.
Step 2/2: :meth:`request_user_token`.
Parameters
----------
scope
token privileges, accepts a :class:`Scope`, a single :class:`scope`,
a list of :class:`scopes <scope>` and strings for :class:`Scope`,
or a space-separated list of scopes as a string
state
additional state
show_dialog
force login dialog even if previously authorised
Returns
-------
str
login URL
"""
payload = self._user_auth_payload(scope, state)
payload['show_dialog'] = str(show_dialog).lower()
return OAUTH_AUTHORIZE_URL + '?' + urlencode(payload)
@send_and_process(parse_token(uses_pkce=False))
def request_user_token(self, code: str) -> Token:
"""
Request a new user token.
Step 2/2 in authorisation code flow.
Code is provided as a URL parameter in the redirect URI
after login in step 1: :meth:`user_authorisation_url`.
Parameters
----------
code
code from redirect parameters
Returns
-------
Token
user access token
"""
payload = {
'code': code,
'redirect_uri': self.redirect_uri,
'grant_type': 'authorization_code'
}
return self._token_request(payload, auth=True), ()
@send_and_process(parse_refreshed_token(uses_pkce=False))
def refresh_user_token(self, refresh_token: str) -> Token:
"""
Request a refreshed user token.
Parameters
----------
refresh_token
refresh token
Returns
-------
Token
refreshed user access token
"""
payload = {
'refresh_token': refresh_token,
'grant_type': 'refresh_token'
}
return self._token_request(payload, auth=True), (refresh_token,)
def pkce_user_authorisation(
self,
scope=None,
state: str = None,
verifier_bytes: int = 32,
) -> Tuple[str, str]:
"""
Construct authorisation URL and verifier.
Step 1/2 in authorisation code flow with proof key for code exchange.
The user should be redirected to the resulting URL for authorisation.
The verifier is passed to :meth:`request_pkce_token` in step 2.
Parameters
----------
scope
token privileges, accepts a :class:`Scope`, a single :class:`scope`,
a list of :class:`scopes <scope>` and strings for :class:`Scope`,
or a space-separated list of scopes as a string
state
additional state
verifier_bytes
number of bytes to generate PKCE verifier with, ``32 <= bytes <= 96``.
The specified range of bytes generates the appropriate number of
characters (43 - 128) after base-64 encoding, as required in RFC 7636.
Returns
-------
Tuple[str, str]
authorisation URL and PKCE code verifier
"""
assert 32 <= verifier_bytes <= 96, 'Invalid number of verifier bytes!'
verifier = token_urlsafe(verifier_bytes)
sha = sha256(verifier.encode())
challenge = b64urlencode(sha.digest())
payload = self._user_auth_payload(scope, state)
payload['code_challenge'] = challenge
payload['code_challenge_method'] = 'S256'
auth_url = OAUTH_AUTHORIZE_URL + '?' + urlencode(payload)
return auth_url, verifier
@send_and_process(parse_token(uses_pkce=True))
def request_pkce_token(self, code: str, verifier: str) -> Token:
"""
Request a new PKCE user token.
Step 2/2 in authorisation code flow with proof key for code exchange.
Code is provided as a URL parameter in the redirect URI
after login in step 1: :meth:`pkce_user_authorisation`.
Parameters
----------
code
code from redirect parameters
verifier
PKCE code verifier generated for authorisation URL
Returns
-------
Token
user access token
"""
payload = {
'client_id': self.client_id,
'code': code,
'code_verifier': verifier,
'grant_type': 'authorization_code',
'redirect_uri': self.redirect_uri,
}
return self._token_request(payload, auth=False), ()
@send_and_process(parse_refreshed_token(uses_pkce=True))
def refresh_pkce_token(self, refresh_token: str) -> Token:
"""
Request a refreshed PKCE user token.
Parameters
----------
refresh_token
refresh token
Returns
-------
Token
refreshed user access token
"""
payload = {
'client_id': self.client_id,
'grant_type': 'refresh_token',
'refresh_token': refresh_token,
}
return self._token_request(payload, auth=False), (refresh_token,)
def refresh(self, token: Token) -> Token:
"""
Refresh an access token.
Both client and user tokens are accepted and refreshed.
The correct refreshing method is applied regardless if PKCE was used or not.
For client tokens, a new token is returned.
For user tokens, a refreshed token is returned.
Parameters
----------
token
token to be refreshed
Returns
-------
Token
refreshed access token
"""
if token.refresh_token is None:
return self.request_client_token()
elif token.uses_pkce:
return self.refresh_pkce_token(token.refresh_token)
else:
return self.refresh_user_token(token.refresh_token)
| 1,517 | 0 | 108 |
0587045a70a089d0e2fb0c2508b2623eba5fd3c0 | 3,700 | py | Python | automow_maps/scripts/field_publisher.py | Auburn-Automow/au_automow_common | 920be6a740aa6d738e9954417b41490e353efd04 | [
"BSD-3-Clause"
] | 43 | 2016-03-05T17:06:29.000Z | 2022-03-10T08:50:46.000Z | automow_maps/scripts/field_publisher.py | qintxwd/au_automow_common | 920be6a740aa6d738e9954417b41490e353efd04 | [
"BSD-3-Clause"
] | 2 | 2017-07-10T12:43:49.000Z | 2019-03-13T13:57:31.000Z | automow_maps/scripts/field_publisher.py | qintxwd/au_automow_common | 920be6a740aa6d738e9954417b41490e353efd04 | [
"BSD-3-Clause"
] | 22 | 2016-03-23T06:10:52.000Z | 2022-03-10T08:50:49.000Z | #!/usr/bin/env python
"""
This ROS node takes the field survey file and publishes a
field polygon as a geometry_msgs/PolygonStamped for use in
other nodes and for visualization in rviz.
"""
import roslib; roslib.load_manifest('automow_maps')
import rospy
from geometry_msgs.msg import PolygonStamped, Point32, Polygon
class FieldPublisherNode(object):
"""
This is a ROS node that is responsible for publishing the field.
"""
if __name__ == '__main__':
fpn = FieldPublisherNode()
| 37 | 85 | 0.605405 | #!/usr/bin/env python
"""
This ROS node takes the field survey file and publishes a
field polygon as a geometry_msgs/PolygonStamped for use in
other nodes and for visualization in rviz.
"""
import roslib; roslib.load_manifest('automow_maps')
import rospy
from geometry_msgs.msg import PolygonStamped, Point32, Polygon
class FieldPublisherNode(object):
"""
This is a ROS node that is responsible for publishing the field.
"""
def __init__(self):
# Setup ROS node
rospy.init_node('field_publisher')
# Get ROS parameters
self.field_polygon = rospy.get_param("~field_polygon")
self.field_frame_id = rospy.get_param("~field_frame_id", "odom")
# Setup publishers and subscribers
safety_pub = rospy.Publisher('/field/safety', PolygonStamped, latch=True)
boundry_pub = rospy.Publisher('/field/boundry', PolygonStamped, latch=True)
cut_area_pub = rospy.Publisher('/field/cut_area', PolygonStamped, latch=True)
# Read the field in
if self.read_field_file():
# Publish the msg once, it is latched so no need to repeat
safety_pub.publish(self.safety_msg)
boundry_pub.publish(self.boundry_msg)
cut_area_pub.publish(self.cut_area_msg)
# Spin
rospy.spin()
def read_field_file(self):
# Setup msgs
self.safety_msg = PolygonStamped()
self.boundry_msg = PolygonStamped()
self.cut_area_msg = PolygonStamped()
self.safety_msg.header.stamp = rospy.Time.now()
self.safety_msg.header.frame_id = self.field_frame_id
self.boundry_msg.header = self.safety_msg.header
self.cut_area_msg.header = self.safety_msg.header
# Parse out the points
polygon_points = []
polygon_points32 = []
point_count = 0
for point in self.field_polygon:
point_count += 1
if point['fix_type'] < 3:
rospy.logwarn('Point %i has a low quality fix type of %i'
% (point_count, point['fix_type']))
(easting, northing) = (point['easting'], point['northing'])
polygon_points.append((float(easting), float(northing)))
polygon_points32.append(Point32(float(easting), float(northing), 0))
# Put the points into the boundry_msg
self.boundry_msg.polygon = Polygon(polygon_points32)
# Expand and contract the field shape for safety buffer and cut area
safety_points = self.offset_polygon(polygon_points, 2)
cut_area_points = self.offset_polygon(polygon_points, -0.5)
self.safety_msg.polygon = Polygon(safety_points)
self.cut_area_msg.polygon = Polygon(cut_area_points)
return True
def offset_polygon(self, points, offset):
import polygon_offset
from polygon_offset import getinsetpoint
temp_points = []
polygon_offset.OFFSET = -offset
for i in range(len(points)-2):
temp_points.append(getinsetpoint(points[i],
points[i+1],
points[i+2]))
temp_points.append(getinsetpoint(points[-2],
points[-1],
points[0]))
temp_points.append(getinsetpoint(points[-1],
points[0],
points[1]))
result = []
for point in temp_points:
result.append(Point32(point[0], point[1], 0))
return result
if __name__ == '__main__':
fpn = FieldPublisherNode()
| 3,111 | 0 | 80 |
5ddf20fef29c8012101e7fb1a4922bd6af50aece | 26,502 | py | Python | smlb/core/metrics.py | CitrineInformatics/smlb | 28a3689bd36aa8d51031b4faf7e2331bbd8148a9 | [
"Apache-2.0"
] | 6 | 2020-07-27T21:08:55.000Z | 2021-05-04T07:00:29.000Z | smlb/core/metrics.py | CitrineInformatics/smlb | 28a3689bd36aa8d51031b4faf7e2331bbd8148a9 | [
"Apache-2.0"
] | 18 | 2020-09-01T00:47:04.000Z | 2021-09-15T22:16:56.000Z | smlb/core/metrics.py | CitrineInformatics/smlb | 28a3689bd36aa8d51031b4faf7e2331bbd8148a9 | [
"Apache-2.0"
] | 2 | 2020-08-24T21:50:16.000Z | 2020-12-06T05:18:57.000Z | """Evaluation metrics.
Scientific Machine Learning Benchmark:
A benchmark of regression models in chem- and materials informatics.
(c) Matthias Rupp 2019, Citrine Informatics.
Related terms: objective functions, loss functions, cost functions,
reward functions, utility functions, fitness functions, score functions, merit functions.
Provides classes EvaluationMetric, ScalarEvaluationMetric, VectorEvaluationMetric.
See documentation for relationships and derived metrics.
"""
from abc import ABCMeta, abstractmethod, abstractproperty
from warnings import warn
import numpy as np
import scipy as sp
import scipy.stats # for normal distribution. Python 3.8 will offer a 'statistics' module including PDF and CDF of the normal distribution
from smlb import InvalidParameterError
from smlb import SmlbObject
from smlb import params
##################
# Base classes #
##################
class EvaluationMetric(SmlbObject, metaclass=ABCMeta):
"""Abstract base class for evaluation metrics.
Base class for ScalarEvaluationMetric and VectorEvaluationMetric.
Design notes:
* Derived classes define _evaluate(). Actual evaluation is done by evaluate(),
which can take additional action, for example, modifying the sign of the
returned value according to a preferred orientation for ScalarEvaluationMetrics.
* This solution avoids errors due to derived classes' implementations of
evaluate() not running additional processing required. it does not prevent
a class from accidentally overriding evaluate() instead of _evaluate().
* (_)evaluate methods get passed only the observed ('true') labels of the
validation set. In particular, they do not have access to the training set
labels. This is because the performance of predictions on a set V should
not depend on any other external information; including the training set.
Otherwise, performance on V could change without any change in V.
"""
# A variant with only evaluate() was tried where each evaluate() returns
# a call to a processing method, `return self.processingf(result)`.
# However, for inheritance chains EvaluationMetric -> A -> B
# this would require an additional parameter 'raw' telling when and when not
# to modify the result (or more complicated solutions) and was therefore abandoned.
@abstractmethod
def _evaluate(self, true, pred):
"""Evaluate metric for given observations and predictions.
See evaluate() for function signature and explanation.
Derived classes overwrite this function instead of evaluate()
to allow further modification by EvaluationMetric class.
"""
raise NotImplementedError
def evaluate(self, true, pred):
"""Evaluate metric for given observations and predictions.
Parameters:
true: observed property distributions (PredictiveDistribution)
pred: predictive property distributions (PredictiveDistribution)
Returns:
value of evaluation metric; type depends on the evaluation metric,
for example, a scalar (ScalarEvaluationMetric) or a vector (VectorEvaluationMetric)
Note that both true and pred are distributions, including but not limited to
the delta distribution (deterministic values) and the normal distribution.
Each EvaluationMetric should support at least all combinations (for true and pred) of
deterministic values (delta distributions) and normal distributions.
"""
raise NotImplementedError
def __call__(self, *args, **kwargs):
"""Provide convenient evaluation by being callable.
See evaluate() for details.
"""
return self.evaluate(*args, **kwargs)
class ScalarEvaluationMetric(EvaluationMetric):
"""Base class for scalar-valued EvaluationMetrics."""
def __init__(self, orient=None, **kwargs):
"""Initialize state.
Parameters:
orient: actively orients metric towards minimization (-1) or maximization (+1)
if unspecified, the natural orientation of the metric is retained
Raises:
InvalidParameterError if trying to orient a metric with no natural orientation
"""
super().__init__(**kwargs)
orient = params.enumeration(orient, {-1, +1, None})
self._sign = +1 # default value leaves _evaluate() unchanged
if orient is not None:
if not self.has_orientation:
raise InvalidParameterError("oriented metric", self.orientation)
# -1 if desired and actual orientation disagree, otherwise +1
self._sign = orient * self.orientation
@property
def has_orientation(self):
"""True if oriented.
Here, oriented means that the metric has a preferred direction
(either more negative or more positive values indicating improvement)
and is ordered.
Returns:
True if the metric has an orientation, False otherwise
"""
return self.orientation != 0
@property
def orientation(self):
"""Whether optimization for this metric means minimization, maximization or neither.
Examples without orientation include signed residuals and composite metrics.
Orientation must be constant, that is, it must not change over the lifetime of an object.
Returns:
-1 for minimization, +1 for maximization, or 0 if not applicable
"""
return 0 # default is non-oriented, override method to add orientation
def evaluate(self, true, pred):
"""Evaluate metric for given observations and predictions.
Parameters:
true: observed property distributions (PredictiveDistribution)
pred: predictive property distributions (PredictiveDistribution)
Returns:
a scalar value
Note that both true and pred are distributions, including but not limited to
the delta distribution (deterministic values) and the normal distribution.
The desired orientation can be set in the initializer.
"""
return self._sign * self._evaluate(true, pred)
# todo: introduce a 'summaryf' parameter to enable mean, min, max, ... of vector-valued evaluation metrics
class VectorEvaluationMetric(EvaluationMetric):
"""Base class for vector-valued EvaluationMetrics."""
def evaluate(self, true, pred):
"""Evaluate metric for given observations and predictions.
Parameters:
true: observed property distributions (PredictiveDistribution)
pred: predicted property distributions (PredictiveDistribution)
Returns:
a vector
Note that both true and pred are distributions, including but not limited to
the delta distribution (deterministic values) and the normal distribution.
"""
return self._evaluate(true, pred)
######################
# Error statistics #
######################
class Residuals(VectorEvaluationMetric):
r"""Signed errors (residuals).
Prediction error residuals $f(x_i) - y_i$,
where $x_i$ are inputs, $f$ is the learner and $y_i$ are observed values.
"""
def _evaluate(self, true, pred):
"""Evaluate prediction error residuals.
residuals = predicted - observed
Parameters:
true: observed property distribution; requires only means
pred: predictive property distribution; requires only means
Returns:
residuals as NumPy array
"""
true = params.distribution(true).mean
pred = params.distribution(pred).mean
return pred - true
class AbsoluteResiduals(Residuals):
"""Absolute value of residuals.
Unsigned residuals. Absolute prediction error residuals $|f(x_i) - y_i|$,
where $x_i$ are inputs, $f$ is the learner and $y_i$ are observed values.
"""
def _evaluate(self, true, pred):
"""Evaluate unsigned prediction errors.
unsigned residuals = | pred - observed |
Parameters:
true: observed property distribution; requires only means
pred: predictive property distribution; requires only means
Returns:
unsigned residuals as NumPy array
"""
return np.abs(super()._evaluate(true, pred))
class SquaredResiduals(Residuals):
"""Squared prediction errors.
As Residuals, but squared.
"""
def _evaluate(self, true, pred):
"""Evaluate squared prediction errors.
squared residuals = ( pred - observed )^2
Parameters:
true: observed property distribution; requires only means
pred: predictive property distribution; requires only means
Returns:
squared residuals as NumPy array
"""
return np.square(super()._evaluate(true, pred))
class MeanAbsoluteError(ScalarEvaluationMetric):
"""Mean Absolute Error (MAE)."""
@property
def orientation(self):
"""Indicate minimization."""
return -1
def _evaluate(self, true, pred):
r"""Evaluate Mean Absolute Error (MAE).
\[ \text{MAE} = \frac{1}{n} \sum_{i=1}^n | f(x_i) - y_i | \]
MAE = mean( | pred - observed | )
Parameters:
true: observed property distribution; requires only means
pred: predictive property distribution; requires only means
Returns:
mean absolute error as floating point number
"""
return float(np.mean(AbsoluteResiduals()._evaluate(true, pred)))
class MeanSquaredError(ScalarEvaluationMetric):
"""Mean squared error (MSE)."""
@property
def orientation(self):
"""Indicate minimization."""
return -1
def _evaluate(self, true, pred):
r"""Mean Squared Error (MSE).
\[ \text{MSE} = \frac{1}{n} \sum_{i=1}^n ( f(x_i) - y_i )^2 \]
MSE = mean( square( pred - observed ) )
Parameters:
true: observed property distribution; requires only means
pred: predictive property distribution; requires only means
Returns:
mean squared error as a floating point number
"""
return float(np.mean(SquaredResiduals()._evaluate(true, pred)))
class RootMeanSquaredError(MeanSquaredError):
"""Root Mean Squared Error (RMSE)."""
# same orientation as MeanSquaredError base class
def _evaluate(self, true, pred):
r"""Root Mean Squared Error (RMSE).
\[ \text{RMSE} = \sqrt{ \frac{1}{n} \sum_{i=1}^n ( f(x_i) - y_i )^2 } \]
MSE = root( mean( square( pred - observed ) ) )
Parameters:
true: observed property distribution; requires only means
pred: predictive property distribution; requires only means
Returns:
root mean squared error as a floating point number
"""
return float(np.sqrt(super()._evaluate(true, pred)))
class StandardizedRootMeanSquaredError(RootMeanSquaredError):
r"""Standardized Root Mean Squared Error (stdRMSE).
The standardized RMSE (stdRMSE), relative RMSE, or non-dimensional model
error (NDME) is given by
stdRMSE = RMSE / std. dev., where
\[ \text{std. dev.} = \sqrt{ \frac{1}{n} \sum_{i=1}^n ( y_i - \bar{y} )^2 } \]
and $\bar{y} = \frac{1}{n} \sum_{i=1}^n y_i$.
The denominator can be interpreted as the RMSE of a model that predicts the
mean of the validation set (!) labels. stdRMSE is a unit-less (non-dimensional)
quantity, often between 0 (perfect model) and 1 (guess-the-mean performance).
If the IID assumption is violated, that is, label distributions of
training and validation set differ, stdRMSE can be arbitrarily high.
The name "standardized RMSE" was chosen over "non-dimensional model error"
because it is more specific (e.g., which "error"?) and more directly related
to statistical terminology (e.g., "standard score").
If the IID assumption holds, stdRMSE can be used to compare prediction errors
across different datasets on the same scale (the datasets can still vary in
how hard they are to learn).
An advantage of stdRMSE over RMSE divided by label range is that stdRMSE is less
statistically volatile (min and max are extremal statistics with high variance).
For the estimator of the standard deviation, no bias correction is used by default
(easing comparisons in many cases). See __init__ docstring for other options.
"""
def __init__(self, bias_correction: float = 0, **kwargs):
"""Initialize metric.
Parameters:
bias_correction: no correction by default. if a positive value d is given,
division is by n-d. Bessel's correction (d=1) is unbiased for variance
estimators, but not for standard deviation estimators. While there is
no value that works across all distributions, d=1.5 is a reasonably
good correction.
"""
self._bias_correction = params.real(bias_correction, from_=0)
super().__init__(**kwargs)
# same orientation as RootMeanSquaredError
def _evaluate(self, true, pred):
"""Root mean squared error divided by standard deviation of labels.
stdRMSE = RMSE / std. dev.
See class docstring for details.
Parameters:
true: observed property distribution; requires only means
pred: predictive property distribution; requires only means
Returns:
standardized root mean squared error as a floating point number
"""
true = params.distribution(true)
# ensure sufficiently many samples
n = len(true.mean)
if n <= 1:
raise InvalidParameterError(
"enough samples to compute standard deviation", f"{n} samples"
)
# compute RMSE and standard deviation
rmse = super()._evaluate(true, pred)
stddev = np.std(true.mean, ddof=self._bias_correction)
# ensure sufficient variance in samples
if stddev <= 1e-3: # hard-coded, could be initialization parameter
raise InvalidParameterError(
"sufficient label variance for non-zero standard deviation",
f"standard deviation of {stddev}",
)
return float(rmse / stddev)
############################
# Uncertainty statistics #
############################
class LogPredictiveDensity(VectorEvaluationMetric):
r"""Logarithmized Predictive Density (LPD)."""
def _evaluate(self, true, pred):
r"""Logarithmic Predictive Density (LPD).
Assumes a normal predictive distribution.
\[
\log p (y_i = t_i | x_i)
= - ( \log \sqrt{2\pi} + \log \sigma_i + \frac{1}{2} ( \frac{y_i - t_i}{\sigma_i} )^2 )
\]
See, for example,
Joaquin Quinonero-Candela, Carl Edward Rasmussen, Fabian Sinz, Olivier Bousquet, and Bernhard Schölkopf.
Evaluating predictive uncertainty challenge, p. 1-27, 2005. In Joaquin Quinonero-Candela, Ido Dagan,
Bernardo Magnini, and Florence d'Alché Buc (editors), Proceedings of the First PASCAL Machine Learning
Challenges Workshop (MLCW 2005), Southampton, United Kingdom, April 11–13, 2005.
Parameters:
true: observed property distribution; requires only means
pred: predictive property distribution; must be normal
Returns:
logarithmic predictive densities as a NumPy vector of floating point numbers
"""
true = params.distribution(true)
pred = params.normal_distribution(pred)
if np.any(pred.stddev == 0):
warn(
f"Some uncertainties are zero. Metric {self.__class__.__name__}" "may return nan.",
RuntimeWarning,
)
lpd = -(
np.log(np.sqrt(2 * np.pi))
+ np.log(pred.stddev)
+ 0.5 * np.square((true.mean - pred.mean) / pred.stddev)
)
return lpd
class MeanLogPredictiveDensity(ScalarEvaluationMetric):
"""Mean Logarithmized Predictive Density (MLPD)."""
@property
def orientation(self):
"""Indicate maximization."""
return +1
def _evaluate(self, true, pred):
r"""Mean Logarithmic Predictive Density (MLPD).
Mean of LogPredictiveDensity.
Assumes a normal predictive distribution.
\[
1/n \sum_{i=1}^n \log p (y_i = t_i | x_i)
= - ( \log \sqrt{2\pi} + \frac{1}{2n} \sum_{i=1}^n ( \log \sigma_i^2 + \frac{(y_i-t_i)^2}{\sigma_i^2} ) )
\]
See LogPredictiveDensity for details.
Parameters:
true: observed property distribution; requires only means
pred: predictive property distribution; must be normal
Returns:
mean logarithmic predictive densities as a floating point number
"""
return np.mean(LogPredictiveDensity()._evaluate(true, pred))
class ContinuousRankedProbabilityScore(VectorEvaluationMetric):
r"""Continuous Ranked Probability Score (CRPS).
The Continuous Ranked Probability Score (CRPS) [1] is the squared-difference integral
between the predicted cumulative distribution function F and that of a delta function
on the true value:
\int\limits_{-\infty}^{\infty} \bigl( F(u) - F_y(u) \bigr)^2 w(u) \mathrm{d} u ,
where $F_y(u) = 0$ for $u \leq y$ and 1 otherwise, and $w$ is a weighting function.
For normal predictive distributions, an analytic expression exists: [2]
\sigma \Bigl( y' \bigl( 2 \Phi(y') - 1 \bigr) + 2 \phi(y') - \frac{1}{\sqrt{\pi}} \Bigr)
where $y' = \frac{y-\mu}{\sigma}$, and, $\Phi$ and $\phi$ are cumulative and probability
density functions of the standard normal distribution.
[1] James E. Matheson and Robert L. Winkler. Scoring rules for continuous
probability distributions. Management Science 22(10):1087–1096, 1976.
[2] Tilmann Gneiting, Adrian E. Raftery, Anton H. Westveld III, Tom Goldman. Calibrated
probabilistic forecasting using ensemble model output statistics and minimum CRPS
estimation. Monthly Weather Review, 133(5):1098–1118, 2005.
"""
def _evaluate(self, true, pred):
"""Evaluate continuous ranked probability score (CRPS).
CRPS depends on the mean of the observations and, in general, the full predictive distribution.
Currently implemented only for normal predictive distributions, for which a closed-form expression exists.
For arbitrary distributions (given as samples), an expression suitable for direct implementation is given by Equ. 3 in
Eric P. Grimit, Tilmann Gneiting, Veronica J. Berrocal, Nicholas A. Johnson:
The continuous ranked probability score for circular variables and its application to mesoscale forecast ensemble verification,
Quarterly Journal of the Royal Meteorological Society 132(621C): 2925--2942, 2006. DOI 10.1256/qj.05.235
Parameters:
true: observed property distributions; requires only means
pred: predictive property distributions
Returns:
sequence of metric values
continuous ranked probability scores as a NumPy vector of floating point numbers
"""
true = params.distribution(true)
pred = params.normal_distribution(pred)
if np.any(pred.stddev == 0):
warn(
f"Some uncertainties are zero. Metric {self.__class__.__name__}" "may return nan.",
RuntimeWarning,
)
strue = (true.mean - pred.mean) / pred.stddev # re-used intermediate quantity
crps = pred.stddev * (
strue * (2 * sp.stats.norm.cdf(strue) - 1)
+ 2 * sp.stats.norm.pdf(strue)
- 1 / np.sqrt(np.pi)
)
return crps
class MeanContinuousRankedProbabilityScore(ScalarEvaluationMetric):
"""Mean Continuous Ranked Probability Score (mCRPS)."""
@property
def orientation(self):
"""Indicate minimization."""
return -1
def _evaluate(self, true, pred):
"""Return arithmetic mean of CRPS."""
return np.mean(ContinuousRankedProbabilityScore()._evaluate(true, pred))
class StandardConfidence(ScalarEvaluationMetric):
"""Fraction of the time that the magnitude of the residual is less than the predicted standard deviation.
Standard confidence evaluates the quality of the predicted uncertainty estimates, both in terms of individual predictions and overall normalization.
Does not depend on the predicted values, only the residuals.
An alternative definition of standard confidence is as the fraction of observations for which the
"normalized residual" -- residual divided by predicted uncertainty -- is less than one.
In the ideal case the normalized residuals are normally distributed with std=1, and
so in the ideal case the standard confidence will be 0.68. Thus there is no "orientation",
and closer to 0.68 is better.
The standard confidence is the observed coverage probability at the 68% confidence level.
See e.g. https://www.stats.ox.ac.uk/pub/bdr/IAUL/Course1Notes5.pdf.
"""
def _evaluate(self, true, pred):
"""Compute standard confidence
Parameters:
true: observed property distributions; requires only means
pred: predictive property distributions
Returns:
standard confidence
"""
true = params.distribution(true)
pred = params.normal_distribution(pred)
abs_residual = np.abs(true.mean - pred.mean)
is_less = abs_residual < pred.stddev
stdconf = np.mean(is_less)
return stdconf
class RootMeanSquareStandardizedResiduals(ScalarEvaluationMetric):
"""Root Mean Square of the Standardized Residuals (RMSSE).
RMSSE evaluates the quality of the predicted uncertainty estimates, both in terms of individual predictions and overall normalization.
Compared to standard confidence, RMSSE is more sensitive to outliers.
Does not depend on the predicted values, only the residuals.
No "orientation". Closer to 1 is better.
"""
def _evaluate(self, true, pred):
"""Compute RMSSE.
Parameters:
true: observed property distributions; requires only means
pred: predictive property distributions
Returns:
RMSSE
"""
true = params.distribution(true)
pred = params.normal_distribution(pred)
if np.any(pred.stddev == 0):
warn(
f"Some uncertainties are zero. Metric {self.__class__.__name__}" "will be nan.",
RuntimeWarning,
)
return np.nan
strue = (true.mean - pred.mean) / pred.stddev
rmsse = np.sqrt(np.mean(np.power(strue, 2)))
return rmsse
class UncertaintyCorrelation(ScalarEvaluationMetric):
"""Correlation between uncertainty estimate and abs(residual).
A positive value is desirable. A negative value indicates pathological behavior.
Does not depend on the predicted values, only the residuals.
"""
@property
def orientation(self):
"""Indicate maximization."""
return +1
def _evaluate(self, true, pred):
"""Compute Uncertainty Correlation
Parameters:
true: observed property distributions; requires only means
pred: predictive property distributions
Returns:
uncertainty correlation
"""
true = params.distribution(true)
pred = params.normal_distribution(pred)
abs_residual = np.abs(true.mean - pred.mean)
uc_corr = np.corrcoef(abs_residual, pred.stddev)[
0, 1
] # get off-diagonal of correlation matrix
return uc_corr
# helper function
def two_sample_cumulative_distribution_function_statistic(
sample_a, sample_b, f=lambda p, t: np.square(p - t), g=lambda s, w: np.sum(s * w)
):
r"""Compute a statistic of the difference between two empirical cumulative distribution functions.
Calculate statistics of the cumulative distribution functions (CDF) of two samples.
Let $x_1,\ldots,x_d$ be the union of the two samples, $x_i < x_{i+1}$, and let
$w_i = x_{i+1}-x_i$, $i = 1,\ldots,d-1$ be the differences between them.
The calculated statistics have the form $g(s,w)$ where $s_i = f(F_a(x_i), F_b(x_i))$)
and $F_a$, $F_b$ are the CDFs of the two samples.
Here, the $x_i$ are the points where one or both of the CDFs changes, $f$ is a statistic
that depends on the value of the two CDFs, and $g$ is an arbitrary function of $s$ and $w$.
The default choice for $g$ is Riemann integration; as the CDFs are step functions, this is exact
and leads to statistics of the form
\[ \int_{-\infty}^{\infty} f(F_a(x),F_b(x)) dx . \]
Parameters:
sample_a: first sample; a sequence of real numbers
sample_b: second sample; a sequence of real numbers;
can be of different length than first sample
f: function accepting two same-length real vectors, returning a real vector of same length.
This function computes a value that depends only on the two CDFs, and is thus constant
between change points. The default is the squared difference, f(a,b) = np.square(a-b).
The convention here is to use the left endpoint of the "steps".
g: function accepting two same-length real vectors, returning a real number.
Computes the statistic based on values of f and step "widths".
The default, g(s,w) = np.sum(g * w), performs Riemann integration.
"""
sample_a = params.real_vector(sample_a)
sample_b = params.real_vector(sample_b)
allx = np.union1d(sample_a, sample_b) # all x where F_a and F_b change
xdif = np.ediff1d(allx) # width of Riemann integration bars
allx = allx.reshape((len(allx), 1))
cdfa = np.count_nonzero(np.sort(sample_a) <= allx, axis=1) / len(sample_a)
cdfb = np.count_nonzero(np.sort(sample_b) <= allx, axis=1) / len(sample_b)
stat = np.asfarray(f(cdfa, cdfb))
return g(stat[:-1], xdif)
| 35.716981 | 152 | 0.659497 | """Evaluation metrics.
Scientific Machine Learning Benchmark:
A benchmark of regression models in chem- and materials informatics.
(c) Matthias Rupp 2019, Citrine Informatics.
Related terms: objective functions, loss functions, cost functions,
reward functions, utility functions, fitness functions, score functions, merit functions.
Provides classes EvaluationMetric, ScalarEvaluationMetric, VectorEvaluationMetric.
See documentation for relationships and derived metrics.
"""
from abc import ABCMeta, abstractmethod, abstractproperty
from warnings import warn
import numpy as np
import scipy as sp
import scipy.stats # for normal distribution. Python 3.8 will offer a 'statistics' module including PDF and CDF of the normal distribution
from smlb import InvalidParameterError
from smlb import SmlbObject
from smlb import params
##################
# Base classes #
##################
class EvaluationMetric(SmlbObject, metaclass=ABCMeta):
"""Abstract base class for evaluation metrics.
Base class for ScalarEvaluationMetric and VectorEvaluationMetric.
Design notes:
* Derived classes define _evaluate(). Actual evaluation is done by evaluate(),
which can take additional action, for example, modifying the sign of the
returned value according to a preferred orientation for ScalarEvaluationMetrics.
* This solution avoids errors due to derived classes' implementations of
evaluate() not running additional processing required. it does not prevent
a class from accidentally overriding evaluate() instead of _evaluate().
* (_)evaluate methods get passed only the observed ('true') labels of the
validation set. In particular, they do not have access to the training set
labels. This is because the performance of predictions on a set V should
not depend on any other external information; including the training set.
Otherwise, performance on V could change without any change in V.
"""
# A variant with only evaluate() was tried where each evaluate() returns
# a call to a processing method, `return self.processingf(result)`.
# However, for inheritance chains EvaluationMetric -> A -> B
# this would require an additional parameter 'raw' telling when and when not
# to modify the result (or more complicated solutions) and was therefore abandoned.
@abstractmethod
def _evaluate(self, true, pred):
"""Evaluate metric for given observations and predictions.
See evaluate() for function signature and explanation.
Derived classes overwrite this function instead of evaluate()
to allow further modification by EvaluationMetric class.
"""
raise NotImplementedError
def evaluate(self, true, pred):
"""Evaluate metric for given observations and predictions.
Parameters:
true: observed property distributions (PredictiveDistribution)
pred: predictive property distributions (PredictiveDistribution)
Returns:
value of evaluation metric; type depends on the evaluation metric,
for example, a scalar (ScalarEvaluationMetric) or a vector (VectorEvaluationMetric)
Note that both true and pred are distributions, including but not limited to
the delta distribution (deterministic values) and the normal distribution.
Each EvaluationMetric should support at least all combinations (for true and pred) of
deterministic values (delta distributions) and normal distributions.
"""
raise NotImplementedError
def __call__(self, *args, **kwargs):
"""Provide convenient evaluation by being callable.
See evaluate() for details.
"""
return self.evaluate(*args, **kwargs)
class ScalarEvaluationMetric(EvaluationMetric):
"""Base class for scalar-valued EvaluationMetrics."""
def __init__(self, orient=None, **kwargs):
"""Initialize state.
Parameters:
orient: actively orients metric towards minimization (-1) or maximization (+1)
if unspecified, the natural orientation of the metric is retained
Raises:
InvalidParameterError if trying to orient a metric with no natural orientation
"""
super().__init__(**kwargs)
orient = params.enumeration(orient, {-1, +1, None})
self._sign = +1 # default value leaves _evaluate() unchanged
if orient is not None:
if not self.has_orientation:
raise InvalidParameterError("oriented metric", self.orientation)
# -1 if desired and actual orientation disagree, otherwise +1
self._sign = orient * self.orientation
@property
def has_orientation(self):
"""True if oriented.
Here, oriented means that the metric has a preferred direction
(either more negative or more positive values indicating improvement)
and is ordered.
Returns:
True if the metric has an orientation, False otherwise
"""
return self.orientation != 0
@property
def orientation(self):
"""Whether optimization for this metric means minimization, maximization or neither.
Examples without orientation include signed residuals and composite metrics.
Orientation must be constant, that is, it must not change over the lifetime of an object.
Returns:
-1 for minimization, +1 for maximization, or 0 if not applicable
"""
return 0 # default is non-oriented, override method to add orientation
def evaluate(self, true, pred):
"""Evaluate metric for given observations and predictions.
Parameters:
true: observed property distributions (PredictiveDistribution)
pred: predictive property distributions (PredictiveDistribution)
Returns:
a scalar value
Note that both true and pred are distributions, including but not limited to
the delta distribution (deterministic values) and the normal distribution.
The desired orientation can be set in the initializer.
"""
return self._sign * self._evaluate(true, pred)
# todo: introduce a 'summaryf' parameter to enable mean, min, max, ... of vector-valued evaluation metrics
class VectorEvaluationMetric(EvaluationMetric):
"""Base class for vector-valued EvaluationMetrics."""
def evaluate(self, true, pred):
"""Evaluate metric for given observations and predictions.
Parameters:
true: observed property distributions (PredictiveDistribution)
pred: predicted property distributions (PredictiveDistribution)
Returns:
a vector
Note that both true and pred are distributions, including but not limited to
the delta distribution (deterministic values) and the normal distribution.
"""
return self._evaluate(true, pred)
######################
# Error statistics #
######################
class Residuals(VectorEvaluationMetric):
r"""Signed errors (residuals).
Prediction error residuals $f(x_i) - y_i$,
where $x_i$ are inputs, $f$ is the learner and $y_i$ are observed values.
"""
def _evaluate(self, true, pred):
"""Evaluate prediction error residuals.
residuals = predicted - observed
Parameters:
true: observed property distribution; requires only means
pred: predictive property distribution; requires only means
Returns:
residuals as NumPy array
"""
true = params.distribution(true).mean
pred = params.distribution(pred).mean
return pred - true
class AbsoluteResiduals(Residuals):
"""Absolute value of residuals.
Unsigned residuals. Absolute prediction error residuals $|f(x_i) - y_i|$,
where $x_i$ are inputs, $f$ is the learner and $y_i$ are observed values.
"""
def _evaluate(self, true, pred):
"""Evaluate unsigned prediction errors.
unsigned residuals = | pred - observed |
Parameters:
true: observed property distribution; requires only means
pred: predictive property distribution; requires only means
Returns:
unsigned residuals as NumPy array
"""
return np.abs(super()._evaluate(true, pred))
class SquaredResiduals(Residuals):
"""Squared prediction errors.
As Residuals, but squared.
"""
def _evaluate(self, true, pred):
"""Evaluate squared prediction errors.
squared residuals = ( pred - observed )^2
Parameters:
true: observed property distribution; requires only means
pred: predictive property distribution; requires only means
Returns:
squared residuals as NumPy array
"""
return np.square(super()._evaluate(true, pred))
class MeanAbsoluteError(ScalarEvaluationMetric):
"""Mean Absolute Error (MAE)."""
@property
def orientation(self):
"""Indicate minimization."""
return -1
def _evaluate(self, true, pred):
r"""Evaluate Mean Absolute Error (MAE).
\[ \text{MAE} = \frac{1}{n} \sum_{i=1}^n | f(x_i) - y_i | \]
MAE = mean( | pred - observed | )
Parameters:
true: observed property distribution; requires only means
pred: predictive property distribution; requires only means
Returns:
mean absolute error as floating point number
"""
return float(np.mean(AbsoluteResiduals()._evaluate(true, pred)))
class MeanSquaredError(ScalarEvaluationMetric):
"""Mean squared error (MSE)."""
@property
def orientation(self):
"""Indicate minimization."""
return -1
def _evaluate(self, true, pred):
r"""Mean Squared Error (MSE).
\[ \text{MSE} = \frac{1}{n} \sum_{i=1}^n ( f(x_i) - y_i )^2 \]
MSE = mean( square( pred - observed ) )
Parameters:
true: observed property distribution; requires only means
pred: predictive property distribution; requires only means
Returns:
mean squared error as a floating point number
"""
return float(np.mean(SquaredResiduals()._evaluate(true, pred)))
class RootMeanSquaredError(MeanSquaredError):
"""Root Mean Squared Error (RMSE)."""
# same orientation as MeanSquaredError base class
def _evaluate(self, true, pred):
r"""Root Mean Squared Error (RMSE).
\[ \text{RMSE} = \sqrt{ \frac{1}{n} \sum_{i=1}^n ( f(x_i) - y_i )^2 } \]
MSE = root( mean( square( pred - observed ) ) )
Parameters:
true: observed property distribution; requires only means
pred: predictive property distribution; requires only means
Returns:
root mean squared error as a floating point number
"""
return float(np.sqrt(super()._evaluate(true, pred)))
class StandardizedRootMeanSquaredError(RootMeanSquaredError):
r"""Standardized Root Mean Squared Error (stdRMSE).
The standardized RMSE (stdRMSE), relative RMSE, or non-dimensional model
error (NDME) is given by
stdRMSE = RMSE / std. dev., where
\[ \text{std. dev.} = \sqrt{ \frac{1}{n} \sum_{i=1}^n ( y_i - \bar{y} )^2 } \]
and $\bar{y} = \frac{1}{n} \sum_{i=1}^n y_i$.
The denominator can be interpreted as the RMSE of a model that predicts the
mean of the validation set (!) labels. stdRMSE is a unit-less (non-dimensional)
quantity, often between 0 (perfect model) and 1 (guess-the-mean performance).
If the IID assumption is violated, that is, label distributions of
training and validation set differ, stdRMSE can be arbitrarily high.
The name "standardized RMSE" was chosen over "non-dimensional model error"
because it is more specific (e.g., which "error"?) and more directly related
to statistical terminology (e.g., "standard score").
If the IID assumption holds, stdRMSE can be used to compare prediction errors
across different datasets on the same scale (the datasets can still vary in
how hard they are to learn).
An advantage of stdRMSE over RMSE divided by label range is that stdRMSE is less
statistically volatile (min and max are extremal statistics with high variance).
For the estimator of the standard deviation, no bias correction is used by default
(easing comparisons in many cases). See __init__ docstring for other options.
"""
def __init__(self, bias_correction: float = 0, **kwargs):
"""Initialize metric.
Parameters:
bias_correction: no correction by default. if a positive value d is given,
division is by n-d. Bessel's correction (d=1) is unbiased for variance
estimators, but not for standard deviation estimators. While there is
no value that works across all distributions, d=1.5 is a reasonably
good correction.
"""
self._bias_correction = params.real(bias_correction, from_=0)
super().__init__(**kwargs)
# same orientation as RootMeanSquaredError
def _evaluate(self, true, pred):
"""Root mean squared error divided by standard deviation of labels.
stdRMSE = RMSE / std. dev.
See class docstring for details.
Parameters:
true: observed property distribution; requires only means
pred: predictive property distribution; requires only means
Returns:
standardized root mean squared error as a floating point number
"""
true = params.distribution(true)
# ensure sufficiently many samples
n = len(true.mean)
if n <= 1:
raise InvalidParameterError(
"enough samples to compute standard deviation", f"{n} samples"
)
# compute RMSE and standard deviation
rmse = super()._evaluate(true, pred)
stddev = np.std(true.mean, ddof=self._bias_correction)
# ensure sufficient variance in samples
if stddev <= 1e-3: # hard-coded, could be initialization parameter
raise InvalidParameterError(
"sufficient label variance for non-zero standard deviation",
f"standard deviation of {stddev}",
)
return float(rmse / stddev)
############################
# Uncertainty statistics #
############################
class LogPredictiveDensity(VectorEvaluationMetric):
r"""Logarithmized Predictive Density (LPD)."""
def _evaluate(self, true, pred):
r"""Logarithmic Predictive Density (LPD).
Assumes a normal predictive distribution.
\[
\log p (y_i = t_i | x_i)
= - ( \log \sqrt{2\pi} + \log \sigma_i + \frac{1}{2} ( \frac{y_i - t_i}{\sigma_i} )^2 )
\]
See, for example,
Joaquin Quinonero-Candela, Carl Edward Rasmussen, Fabian Sinz, Olivier Bousquet, and Bernhard Schölkopf.
Evaluating predictive uncertainty challenge, p. 1-27, 2005. In Joaquin Quinonero-Candela, Ido Dagan,
Bernardo Magnini, and Florence d'Alché Buc (editors), Proceedings of the First PASCAL Machine Learning
Challenges Workshop (MLCW 2005), Southampton, United Kingdom, April 11–13, 2005.
Parameters:
true: observed property distribution; requires only means
pred: predictive property distribution; must be normal
Returns:
logarithmic predictive densities as a NumPy vector of floating point numbers
"""
true = params.distribution(true)
pred = params.normal_distribution(pred)
if np.any(pred.stddev == 0):
warn(
f"Some uncertainties are zero. Metric {self.__class__.__name__}" "may return nan.",
RuntimeWarning,
)
lpd = -(
np.log(np.sqrt(2 * np.pi))
+ np.log(pred.stddev)
+ 0.5 * np.square((true.mean - pred.mean) / pred.stddev)
)
return lpd
class MeanLogPredictiveDensity(ScalarEvaluationMetric):
"""Mean Logarithmized Predictive Density (MLPD)."""
@property
def orientation(self):
"""Indicate maximization."""
return +1
def _evaluate(self, true, pred):
r"""Mean Logarithmic Predictive Density (MLPD).
Mean of LogPredictiveDensity.
Assumes a normal predictive distribution.
\[
1/n \sum_{i=1}^n \log p (y_i = t_i | x_i)
= - ( \log \sqrt{2\pi} + \frac{1}{2n} \sum_{i=1}^n ( \log \sigma_i^2 + \frac{(y_i-t_i)^2}{\sigma_i^2} ) )
\]
See LogPredictiveDensity for details.
Parameters:
true: observed property distribution; requires only means
pred: predictive property distribution; must be normal
Returns:
mean logarithmic predictive densities as a floating point number
"""
return np.mean(LogPredictiveDensity()._evaluate(true, pred))
class ContinuousRankedProbabilityScore(VectorEvaluationMetric):
r"""Continuous Ranked Probability Score (CRPS).
The Continuous Ranked Probability Score (CRPS) [1] is the squared-difference integral
between the predicted cumulative distribution function F and that of a delta function
on the true value:
\int\limits_{-\infty}^{\infty} \bigl( F(u) - F_y(u) \bigr)^2 w(u) \mathrm{d} u ,
where $F_y(u) = 0$ for $u \leq y$ and 1 otherwise, and $w$ is a weighting function.
For normal predictive distributions, an analytic expression exists: [2]
\sigma \Bigl( y' \bigl( 2 \Phi(y') - 1 \bigr) + 2 \phi(y') - \frac{1}{\sqrt{\pi}} \Bigr)
where $y' = \frac{y-\mu}{\sigma}$, and, $\Phi$ and $\phi$ are cumulative and probability
density functions of the standard normal distribution.
[1] James E. Matheson and Robert L. Winkler. Scoring rules for continuous
probability distributions. Management Science 22(10):1087–1096, 1976.
[2] Tilmann Gneiting, Adrian E. Raftery, Anton H. Westveld III, Tom Goldman. Calibrated
probabilistic forecasting using ensemble model output statistics and minimum CRPS
estimation. Monthly Weather Review, 133(5):1098–1118, 2005.
"""
def _evaluate(self, true, pred):
"""Evaluate continuous ranked probability score (CRPS).
CRPS depends on the mean of the observations and, in general, the full predictive distribution.
Currently implemented only for normal predictive distributions, for which a closed-form expression exists.
For arbitrary distributions (given as samples), an expression suitable for direct implementation is given by Equ. 3 in
Eric P. Grimit, Tilmann Gneiting, Veronica J. Berrocal, Nicholas A. Johnson:
The continuous ranked probability score for circular variables and its application to mesoscale forecast ensemble verification,
Quarterly Journal of the Royal Meteorological Society 132(621C): 2925--2942, 2006. DOI 10.1256/qj.05.235
Parameters:
true: observed property distributions; requires only means
pred: predictive property distributions
Returns:
sequence of metric values
continuous ranked probability scores as a NumPy vector of floating point numbers
"""
true = params.distribution(true)
pred = params.normal_distribution(pred)
if np.any(pred.stddev == 0):
warn(
f"Some uncertainties are zero. Metric {self.__class__.__name__}" "may return nan.",
RuntimeWarning,
)
strue = (true.mean - pred.mean) / pred.stddev # re-used intermediate quantity
crps = pred.stddev * (
strue * (2 * sp.stats.norm.cdf(strue) - 1)
+ 2 * sp.stats.norm.pdf(strue)
- 1 / np.sqrt(np.pi)
)
return crps
class MeanContinuousRankedProbabilityScore(ScalarEvaluationMetric):
"""Mean Continuous Ranked Probability Score (mCRPS)."""
@property
def orientation(self):
"""Indicate minimization."""
return -1
def _evaluate(self, true, pred):
"""Return arithmetic mean of CRPS."""
return np.mean(ContinuousRankedProbabilityScore()._evaluate(true, pred))
class StandardConfidence(ScalarEvaluationMetric):
"""Fraction of the time that the magnitude of the residual is less than the predicted standard deviation.
Standard confidence evaluates the quality of the predicted uncertainty estimates, both in terms of individual predictions and overall normalization.
Does not depend on the predicted values, only the residuals.
An alternative definition of standard confidence is as the fraction of observations for which the
"normalized residual" -- residual divided by predicted uncertainty -- is less than one.
In the ideal case the normalized residuals are normally distributed with std=1, and
so in the ideal case the standard confidence will be 0.68. Thus there is no "orientation",
and closer to 0.68 is better.
The standard confidence is the observed coverage probability at the 68% confidence level.
See e.g. https://www.stats.ox.ac.uk/pub/bdr/IAUL/Course1Notes5.pdf.
"""
def _evaluate(self, true, pred):
"""Compute standard confidence
Parameters:
true: observed property distributions; requires only means
pred: predictive property distributions
Returns:
standard confidence
"""
true = params.distribution(true)
pred = params.normal_distribution(pred)
abs_residual = np.abs(true.mean - pred.mean)
is_less = abs_residual < pred.stddev
stdconf = np.mean(is_less)
return stdconf
class RootMeanSquareStandardizedResiduals(ScalarEvaluationMetric):
"""Root Mean Square of the Standardized Residuals (RMSSE).
RMSSE evaluates the quality of the predicted uncertainty estimates, both in terms of individual predictions and overall normalization.
Compared to standard confidence, RMSSE is more sensitive to outliers.
Does not depend on the predicted values, only the residuals.
No "orientation". Closer to 1 is better.
"""
def _evaluate(self, true, pred):
"""Compute RMSSE.
Parameters:
true: observed property distributions; requires only means
pred: predictive property distributions
Returns:
RMSSE
"""
true = params.distribution(true)
pred = params.normal_distribution(pred)
if np.any(pred.stddev == 0):
warn(
f"Some uncertainties are zero. Metric {self.__class__.__name__}" "will be nan.",
RuntimeWarning,
)
return np.nan
strue = (true.mean - pred.mean) / pred.stddev
rmsse = np.sqrt(np.mean(np.power(strue, 2)))
return rmsse
class UncertaintyCorrelation(ScalarEvaluationMetric):
"""Correlation between uncertainty estimate and abs(residual).
A positive value is desirable. A negative value indicates pathological behavior.
Does not depend on the predicted values, only the residuals.
"""
@property
def orientation(self):
"""Indicate maximization."""
return +1
def _evaluate(self, true, pred):
"""Compute Uncertainty Correlation
Parameters:
true: observed property distributions; requires only means
pred: predictive property distributions
Returns:
uncertainty correlation
"""
true = params.distribution(true)
pred = params.normal_distribution(pred)
abs_residual = np.abs(true.mean - pred.mean)
uc_corr = np.corrcoef(abs_residual, pred.stddev)[
0, 1
] # get off-diagonal of correlation matrix
return uc_corr
# helper function
def two_sample_cumulative_distribution_function_statistic(
sample_a, sample_b, f=lambda p, t: np.square(p - t), g=lambda s, w: np.sum(s * w)
):
r"""Compute a statistic of the difference between two empirical cumulative distribution functions.
Calculate statistics of the cumulative distribution functions (CDF) of two samples.
Let $x_1,\ldots,x_d$ be the union of the two samples, $x_i < x_{i+1}$, and let
$w_i = x_{i+1}-x_i$, $i = 1,\ldots,d-1$ be the differences between them.
The calculated statistics have the form $g(s,w)$ where $s_i = f(F_a(x_i), F_b(x_i))$)
and $F_a$, $F_b$ are the CDFs of the two samples.
Here, the $x_i$ are the points where one or both of the CDFs changes, $f$ is a statistic
that depends on the value of the two CDFs, and $g$ is an arbitrary function of $s$ and $w$.
The default choice for $g$ is Riemann integration; as the CDFs are step functions, this is exact
and leads to statistics of the form
\[ \int_{-\infty}^{\infty} f(F_a(x),F_b(x)) dx . \]
Parameters:
sample_a: first sample; a sequence of real numbers
sample_b: second sample; a sequence of real numbers;
can be of different length than first sample
f: function accepting two same-length real vectors, returning a real vector of same length.
This function computes a value that depends only on the two CDFs, and is thus constant
between change points. The default is the squared difference, f(a,b) = np.square(a-b).
The convention here is to use the left endpoint of the "steps".
g: function accepting two same-length real vectors, returning a real number.
Computes the statistic based on values of f and step "widths".
The default, g(s,w) = np.sum(g * w), performs Riemann integration.
"""
sample_a = params.real_vector(sample_a)
sample_b = params.real_vector(sample_b)
allx = np.union1d(sample_a, sample_b) # all x where F_a and F_b change
xdif = np.ediff1d(allx) # width of Riemann integration bars
allx = allx.reshape((len(allx), 1))
cdfa = np.count_nonzero(np.sort(sample_a) <= allx, axis=1) / len(sample_a)
cdfb = np.count_nonzero(np.sort(sample_b) <= allx, axis=1) / len(sample_b)
stat = np.asfarray(f(cdfa, cdfb))
return g(stat[:-1], xdif)
| 0 | 0 | 0 |
0e281d56c5f9905e8839b0896a7170ae4e709fb0 | 41,296 | py | Python | sepnet/model.py | Andrewzh112/SepNet | 58618efd82a72b156ccbb45005d18d347982865f | [
"MIT"
] | null | null | null | sepnet/model.py | Andrewzh112/SepNet | 58618efd82a72b156ccbb45005d18d347982865f | [
"MIT"
] | null | null | null | sepnet/model.py | Andrewzh112/SepNet | 58618efd82a72b156ccbb45005d18d347982865f | [
"MIT"
] | null | null | null | import torch
from torch import nn
class DistractionConv(nn.Module):
"""Inspired by SKNet"""
| 34.128926 | 79 | 0.499177 | import torch
from torch import nn
def convs(in_channels, out_channels):
return nn.Sequential(
nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
padding=1),
nn.BatchNorm2d(out_channels),
nn.LeakyReLU(inplace=True),
nn.Conv2d(in_channels=out_channels,
out_channels=out_channels,
kernel_size=1),
nn.LeakyReLU(inplace=True)
)
def down_irblock(in_channels, out_channels):
return nn.Sequential(
InvertedResidualBlock(in_channels, in_channels),
nn.LeakyReLU(inplace=True),
nn.Conv2d(in_channels, out_channels, kernel_size=1),
nn.LeakyReLU(inplace=True)
)
class ConvNormReLU(nn.Module):
def __init__(
self,
in_channels,
out_channels,
kernel_size=3,
stride=1,
groups=1,
activation=None,
norm_layer=None):
super().__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if activation is None:
activation = nn.LeakyReLU()
padding = (kernel_size - 1) // 2
self.conv = nn.Conv2d(
in_channels,
out_channels,
kernel_size,
stride,
padding,
groups=groups,
bias=False)
self.norm = norm_layer(out_channels)
self.activate = activation
def forward(self, x):
x = self.conv(x)
x = self.norm(x)
return self.activate(x)
class InvertedResidualBlock(nn.Module):
def __init__(
self,
in_channels,
out_channels,
t=6,
stride=1,
norm_layer=None):
super().__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
hidden_dim = int(in_channels * t)
self.down_sample = stride == 2
modules = [ConvNormReLU(in_channels, hidden_dim,
kernel_size=1, norm_layer=norm_layer)]
if t > 1:
modules.append(ConvNormReLU(hidden_dim,
hidden_dim,
stride=stride,
groups=hidden_dim,
norm_layer=norm_layer))
modules.append(
nn.Conv2d(hidden_dim,
out_channels,
kernel_size=1,
bias=False))
modules.append(norm_layer(out_channels))
self.irblock = nn.Sequential(*modules)
def forward(self, x):
if self.down_sample:
return self.irblock(x)
else:
return self.irblock(x) + x
class UNet_Based(nn.Module):
def __init__(self, in_channels=3, based_dim=64):
super().__init__()
self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv1 = convs(in_channels, based_dim)
self.sconv1 = nn.Conv2d(in_channels=based_dim,
out_channels=based_dim,
kernel_size=3,
stride=2,
padding=1)
self.conv2 = convs(based_dim, based_dim*2)
self.sconv2 = nn.Conv2d(in_channels=based_dim*2,
out_channels=based_dim*2,
kernel_size=3,
stride=2,
padding=1)
self.conv3 = convs(based_dim*2, based_dim*4)
self.sconv3 = nn.Conv2d(in_channels=based_dim*4,
out_channels=based_dim*4,
kernel_size=3,
stride=2,
padding=1)
self.conv4 = convs(based_dim*4, based_dim*8)
self.sconv4 = nn.Conv2d(in_channels=based_dim*8,
out_channels=based_dim*8,
kernel_size=3,
stride=2,
padding=1)
self.conv5 = convs(based_dim*8, based_dim*16)
self.tconv1 = nn.ConvTranspose2d(
in_channels=based_dim*16,
out_channels=based_dim*8,
kernel_size=2,
stride=2
)
self.upconv1 = convs(based_dim*16, based_dim*8)
self.tconv2 = nn.ConvTranspose2d(
in_channels=based_dim*8,
out_channels=based_dim*4,
kernel_size=2,
stride=2
)
self.upconv2 = convs(based_dim*8, based_dim*4)
self.tconv3 = nn.ConvTranspose2d(
in_channels=based_dim*4,
out_channels=based_dim*2,
kernel_size=2,
stride=2
)
self.upconv3 = convs(based_dim*4, based_dim*2)
self.tconv4 = nn.ConvTranspose2d(
in_channels=based_dim*2,
out_channels=based_dim,
kernel_size=2,
stride=2
)
self.upconv4 = convs(based_dim*2, based_dim)
self.sep = nn.Conv2d(
in_channels=based_dim,
out_channels=in_channels*2,
kernel_size=1
)
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='leaky_relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, combined_image):
# encoder
# bs, c, h, w
# maxpool
# x1 = self.conv1(combined_image)
# x2 = self.maxpool(x1)
# x3 = self.conv2(x2)
# x4 = self.maxpool(x3)
# x5 = self.conv3(x4)
# x6 = self.maxpool(x5)
# x7 = self.conv4(x6)
# x8 = self.maxpool(x7)
# x9 = self.conv5(x8)
x1 = self.conv1(combined_image)
x = self.sconv1(x1)
x2 = self.conv2(x)
x = self.sconv2(x2)
x3 = self.conv3(x)
x = self.sconv3(x3)
x4 = self.conv4(x)
x = self.sconv4(x4)
x5 = self.conv5(x)
# decoder
x = self.tconv1(x5)
x = self.upconv1(torch.cat([x, x4], dim=1))
x = self.tconv2(x)
x = self.upconv2(torch.cat([x, x3], dim=1))
x = self.tconv3(x)
x = self.upconv3(torch.cat([x, x2], dim=1))
x = self.tconv4(x)
x = self.upconv4(torch.cat([x, x1], dim=1))
x = self.sep(x)
return x
class MobileUnet(nn.Module):
def __init__(self, in_channels, based_dim=32):
super().__init__()
self.conv1 = convs(in_channels, based_dim)
self.sconv1 = InvertedResidualBlock(based_dim,
based_dim,
stride=2)
self.conv2 = InvertedResidualBlock(based_dim, based_dim)
self.sconv2 = InvertedResidualBlock(based_dim,
based_dim*2,
stride=2)
self.conv3 = InvertedResidualBlock(based_dim*2, based_dim*2)
self.sconv3 = InvertedResidualBlock(based_dim*2,
based_dim*4,
stride=2)
self.conv4 = InvertedResidualBlock(based_dim*4, based_dim*4)
self.sconv4 = InvertedResidualBlock(based_dim*4,
based_dim*8,
stride=2)
self.conv5 = InvertedResidualBlock(based_dim*8, based_dim*8)
self.sconv5 = InvertedResidualBlock(based_dim*8,
based_dim*16,
stride=2)
self.tconv1 = nn.ConvTranspose2d(
in_channels=based_dim*16,
out_channels=based_dim*8,
kernel_size=2,
stride=2
)
self.upconv1 = down_irblock(based_dim*16, based_dim*8)
self.tconv2 = nn.ConvTranspose2d(
in_channels=based_dim*8,
out_channels=based_dim*4,
kernel_size=2,
stride=2
)
self.upconv2 = down_irblock(based_dim*8, based_dim*4)
self.tconv3 = nn.ConvTranspose2d(
in_channels=based_dim*4,
out_channels=based_dim*2,
kernel_size=2,
stride=2
)
self.upconv3 = down_irblock(based_dim*4, based_dim*2)
self.tconv4 = nn.ConvTranspose2d(
in_channels=based_dim*2,
out_channels=based_dim,
kernel_size=2,
stride=2
)
self.upconv4 = down_irblock(based_dim*2, based_dim)
self.tconv5 = nn.ConvTranspose2d(
in_channels=based_dim,
out_channels=based_dim,
kernel_size=2,
stride=2
)
self.sep = nn.Conv2d(
in_channels=based_dim,
out_channels=in_channels*2,
kernel_size=1
)
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='leaky_relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, combined_image):
# encoder
# bs, c, h, w
x = self.conv1(combined_image)
x1 = self.sconv1(x)
x = self.conv2(x1)
x2 = self.sconv2(x)
x = self.conv3(x2)
x3 = self.sconv3(x)
x = self.conv4(x3)
x4 = self.sconv4(x)
x = self.conv5(x4)
x = self.sconv5(x)
# decoder
x = self.tconv1(x)
x = self.upconv1(torch.cat([x, x4], dim=1))
x = self.tconv2(x)
x = self.upconv2(torch.cat([x, x3], dim=1))
x = self.tconv3(x)
x = self.upconv3(torch.cat([x, x2], dim=1))
x = self.tconv4(x)
x = self.upconv4(torch.cat([x, x1], dim=1))
x = self.tconv5(x)
x = self.sep(x)
return x
class MobileUnet1DUp(nn.Module):
def __init__(self, in_channels, based_dim=32):
super().__init__()
self.conv1 = convs(in_channels, based_dim)
self.sconv1 = InvertedResidualBlock(based_dim,
based_dim,
stride=2)
self.conv2 = InvertedResidualBlock(based_dim, based_dim)
self.sconv2 = InvertedResidualBlock(based_dim,
based_dim*2,
stride=2,)
self.conv3 = InvertedResidualBlock(based_dim*2, based_dim*2)
self.sconv3 = InvertedResidualBlock(based_dim*2,
based_dim*4,
stride=2)
self.conv4 = InvertedResidualBlock(based_dim*4, based_dim*4)
self.sconv4 = InvertedResidualBlock(based_dim*4,
based_dim*8,
stride=2)
self.conv5 = InvertedResidualBlock(based_dim*8, based_dim*8)
self.sconv5 = InvertedResidualBlock(based_dim*8,
based_dim*16,
stride=2)
self.tconv1 = nn.Sequential(
nn.ConvTranspose2d(
in_channels=based_dim*16,
out_channels=based_dim*16,
kernel_size=2,
stride=2
),
nn.LeakyReLU(),
down_irblock(
in_channels=based_dim*16,
out_channels=based_dim*8
)
)
self.upconv1 = down_irblock(based_dim*16, based_dim*8)
self.tconv2 = nn.Sequential(
nn.ConvTranspose2d(
in_channels=based_dim*8,
out_channels=based_dim*8,
kernel_size=2,
stride=2
),
nn.LeakyReLU(),
down_irblock(
in_channels=based_dim*8,
out_channels=based_dim*4
)
)
self.upconv2 = down_irblock(based_dim*8, based_dim*4)
self.tconv3 = nn.Sequential(
nn.ConvTranspose2d(
in_channels=based_dim*4,
out_channels=based_dim*4,
kernel_size=2,
stride=2
),
nn.LeakyReLU(),
down_irblock(
in_channels=based_dim*4,
out_channels=based_dim*2
)
)
self.upconv3 = down_irblock(based_dim*4, based_dim*2)
self.tconv4 = nn.Sequential(
nn.ConvTranspose2d(
in_channels=based_dim*2,
out_channels=based_dim*2,
kernel_size=2,
stride=2
),
nn.LeakyReLU(),
down_irblock(
in_channels=based_dim*2,
out_channels=based_dim
)
)
self.upconv4 = down_irblock(based_dim*2, based_dim)
self.tconv5 = nn.ConvTranspose2d(
in_channels=based_dim,
out_channels=based_dim,
kernel_size=2,
stride=2
)
self.sep = down_irblock(
in_channels=based_dim,
out_channels=in_channels*2
)
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='leaky_relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, combined_image):
# encoder
# bs, c, h, w
x = self.conv1(combined_image)
x1 = self.sconv1(x)
x = self.conv2(x1)
x2 = self.sconv2(x)
x = self.conv3(x2)
x3 = self.sconv3(x)
x = self.conv4(x3)
x4 = self.sconv4(x)
x = self.conv5(x4)
x = self.sconv5(x)
# decoder
x = self.tconv1(x)
x = self.upconv1(torch.cat([x, x4], dim=1))
x = self.tconv2(x)
x = self.upconv2(torch.cat([x, x3], dim=1))
x = self.tconv3(x)
x = self.upconv3(torch.cat([x, x2], dim=1))
x = self.tconv4(x)
x = self.upconv4(torch.cat([x, x1], dim=1))
x = self.tconv5(x)
x = self.sep(x)
return x
class MobileUnet2Heads(nn.Module):
def __init__(self, in_channels, based_dim=32):
super().__init__()
self.conv1 = convs(in_channels, based_dim)
self.sconv1 = InvertedResidualBlock(based_dim,
based_dim,
stride=2)
self.conv2 = InvertedResidualBlock(based_dim, based_dim)
self.sconv2 = InvertedResidualBlock(based_dim,
based_dim*2,
stride=2,)
self.conv3 = InvertedResidualBlock(based_dim*2, based_dim*2)
self.sconv3 = InvertedResidualBlock(based_dim*2,
based_dim*4,
stride=2)
self.conv4 = InvertedResidualBlock(based_dim*4, based_dim*4)
self.sconv4 = InvertedResidualBlock(based_dim*4,
based_dim*8,
stride=2)
self.conv5 = InvertedResidualBlock(based_dim*8, based_dim*8)
self.sconv5 = InvertedResidualBlock(based_dim*8,
based_dim*16,
stride=2)
self.tconv1h1 = nn.Sequential(
nn.ConvTranspose2d(
in_channels=based_dim*16,
out_channels=based_dim*16,
kernel_size=2,
stride=2
),
nn.LeakyReLU(),
down_irblock(
in_channels=based_dim*16,
out_channels=based_dim*8
)
)
self.tconv1h2 = nn.Sequential(
nn.ConvTranspose2d(
in_channels=based_dim*16,
out_channels=based_dim*16,
kernel_size=2,
stride=2
),
nn.LeakyReLU(),
down_irblock(
in_channels=based_dim*16,
out_channels=based_dim*8
)
)
self.upconv1h1 = down_irblock(based_dim*16, based_dim*8)
self.tconv2h1 = nn.Sequential(
nn.ConvTranspose2d(
in_channels=based_dim*8,
out_channels=based_dim*8,
kernel_size=2,
stride=2
),
nn.LeakyReLU(),
down_irblock(
in_channels=based_dim*8,
out_channels=based_dim*4
)
)
self.upconv1h2 = down_irblock(based_dim*16, based_dim*8)
self.tconv2h2 = nn.Sequential(
nn.ConvTranspose2d(
in_channels=based_dim*8,
out_channels=based_dim*8,
kernel_size=2,
stride=2
),
nn.LeakyReLU(),
down_irblock(
in_channels=based_dim*8,
out_channels=based_dim*4
)
)
self.upconv2h1 = down_irblock(based_dim*8, based_dim*4)
self.tconv3h1 = nn.Sequential(
nn.ConvTranspose2d(
in_channels=based_dim*4,
out_channels=based_dim*4,
kernel_size=2,
stride=2
),
nn.LeakyReLU(),
down_irblock(
in_channels=based_dim*4,
out_channels=based_dim*2
)
)
self.upconv2h2 = down_irblock(based_dim*8, based_dim*4)
self.tconv3h2 = nn.Sequential(
nn.ConvTranspose2d(
in_channels=based_dim*4,
out_channels=based_dim*4,
kernel_size=2,
stride=2
),
nn.LeakyReLU(),
down_irblock(
in_channels=based_dim*4,
out_channels=based_dim*2
)
)
self.upconv3h1 = down_irblock(based_dim*4, based_dim*2)
self.tconv4h1 = nn.Sequential(
nn.ConvTranspose2d(
in_channels=based_dim*2,
out_channels=based_dim*2,
kernel_size=2,
stride=2
),
nn.LeakyReLU(),
down_irblock(
in_channels=based_dim*2,
out_channels=based_dim
)
)
self.upconv3h2 = down_irblock(based_dim*4, based_dim*2)
self.tconv4h2 = nn.Sequential(
nn.ConvTranspose2d(
in_channels=based_dim*2,
out_channels=based_dim*2,
kernel_size=2,
stride=2
),
nn.LeakyReLU(),
down_irblock(
in_channels=based_dim*2,
out_channels=based_dim
)
)
self.upconv4h1 = down_irblock(based_dim*2, based_dim)
self.tconv5h1 = nn.ConvTranspose2d(
in_channels=based_dim,
out_channels=based_dim,
kernel_size=2,
stride=2
)
self.upconv4h2 = down_irblock(based_dim*2, based_dim)
self.tconv5h2 = nn.ConvTranspose2d(
in_channels=based_dim,
out_channels=based_dim,
kernel_size=2,
stride=2
)
self.seph1 = down_irblock(
in_channels=based_dim,
out_channels=in_channels
)
self.seph2 = down_irblock(
in_channels=based_dim,
out_channels=in_channels
)
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='leaky_relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, combined_image):
# encoder
# bs, c, h, w
x = self.conv1(combined_image)
x1 = self.sconv1(x)
x = self.conv2(x1)
x2 = self.sconv2(x)
x = self.conv3(x2)
x3 = self.sconv3(x)
x = self.conv4(x3)
x4 = self.sconv4(x)
x = self.conv5(x4)
x = self.sconv5(x)
# decoder
xh1, xh2 = self.tconv1h1(x), self.tconv1h2(x)
xh1, xh2 = (self.upconv1h1(torch.cat([xh1, x4], dim=1)),
self.upconv1h2(torch.cat([xh2, x4], dim=1)))
xh1, xh2 = self.tconv2h1(xh1), self.tconv2h2(xh2)
xh1, xh2 = (self.upconv2h1(torch.cat([xh1, x3], dim=1)),
self.upconv2h2(torch.cat([xh2, x3], dim=1)))
xh1, xh2 = self.tconv3h1(xh1), self.tconv3h2(xh2)
xh1, xh2 = (self.upconv3h1(torch.cat([xh1, x2], dim=1)),
self.upconv3h2(torch.cat([xh2, x2], dim=1)))
xh1, xh2 = self.tconv4h1(xh1), self.tconv4h2(xh2)
xh1, xh2 = (self.upconv4h1(torch.cat([xh1, x1], dim=1)),
self.upconv4h2(torch.cat([xh2, x1], dim=1)))
xh1, xh2 = self.tconv5h1(xh1), self.tconv5h2(xh2)
xh1, xh2 = self.seph1(xh1), self.seph2(xh2)
x = torch.cat([xh1, xh2], dim=1)
return x
class MobileUnet2HeadsNoPrev(MobileUnet2Heads):
def __init__(self, in_channels, based_dim=32):
super().__init__(in_channels, based_dim)
def forward(self, combined_image):
# encoder
# bs, c, h, w
x = self.conv1(combined_image)
x = self.sconv1(x)
x = self.conv2(x)
x = self.sconv2(x)
x = self.conv3(x)
x = self.sconv3(x)
x = self.conv4(x)
x = self.sconv4(x)
x = self.conv5(x)
x = self.sconv5(x)
# decoder
xh1, xh2 = self.tconv1h1(x), self.tconv1h2(x)
xh1, xh2 = (self.upconv1h1(torch.cat([xh1, xh2], dim=1)),
self.upconv1h2(torch.cat([xh2, xh1], dim=1)))
xh1, xh2 = self.tconv2h1(xh1), self.tconv2h2(xh2)
xh1, xh2 = (self.upconv2h1(torch.cat([xh1, xh2], dim=1)),
self.upconv2h2(torch.cat([xh2, xh1], dim=1)))
xh1, xh2 = self.tconv3h1(xh1), self.tconv3h2(xh2)
xh1, xh2 = (self.upconv3h1(torch.cat([xh1, xh2], dim=1)),
self.upconv3h2(torch.cat([xh2, xh1], dim=1)))
xh1, xh2 = self.tconv4h1(xh1), self.tconv4h2(xh2)
xh1, xh2 = (self.upconv4h1(torch.cat([xh1, xh2], dim=1)),
self.upconv4h2(torch.cat([xh2, xh1], dim=1)))
xh1, xh2 = self.tconv5h1(xh1), self.tconv5h2(xh2)
xh1, xh2 = self.seph1(xh1), self.seph2(xh2)
x = torch.cat([xh1, xh2], dim=1)
return x
class MobileUnetNoCat(nn.Module):
def __init__(self, in_channels, based_dim=32):
super().__init__()
self.conv1 = convs(in_channels, based_dim)
self.sconv1 = InvertedResidualBlock(based_dim,
based_dim,
stride=2)
self.conv2 = InvertedResidualBlock(based_dim, based_dim)
self.sconv2 = InvertedResidualBlock(based_dim,
based_dim*2,
stride=2,)
self.conv3 = InvertedResidualBlock(based_dim*2, based_dim*2)
self.sconv3 = InvertedResidualBlock(based_dim*2,
based_dim*4,
stride=2)
self.conv4 = InvertedResidualBlock(based_dim*4, based_dim*4)
self.sconv4 = InvertedResidualBlock(based_dim*4,
based_dim*8,
stride=2)
self.conv5 = InvertedResidualBlock(based_dim*8, based_dim*8)
self.sconv5 = InvertedResidualBlock(based_dim*8,
based_dim*16,
stride=2)
self.encoder = nn.Sequential(
self.conv1,
self.sconv1,
self.conv2,
self.sconv2,
self.conv3,
self.sconv3,
self.conv4,
self.sconv4,
self.conv5,
self.sconv5
)
self.tconv1 = nn.Sequential(
nn.ConvTranspose2d(
in_channels=based_dim*16,
out_channels=based_dim*16,
kernel_size=2,
stride=2
),
nn.LeakyReLU(),
down_irblock(
in_channels=based_dim*16,
out_channels=based_dim*8
)
)
self.tconv2 = nn.Sequential(
nn.ConvTranspose2d(
in_channels=based_dim*8,
out_channels=based_dim*8,
kernel_size=2,
stride=2
),
nn.LeakyReLU(),
down_irblock(
in_channels=based_dim*8,
out_channels=based_dim*4
)
)
self.tconv3 = nn.Sequential(
nn.ConvTranspose2d(
in_channels=based_dim*4,
out_channels=based_dim*4,
kernel_size=2,
stride=2
),
nn.LeakyReLU(),
down_irblock(
in_channels=based_dim*4,
out_channels=based_dim*2
)
)
self.tconv4 = nn.Sequential(
nn.ConvTranspose2d(
in_channels=based_dim*2,
out_channels=based_dim*2,
kernel_size=2,
stride=2
),
nn.LeakyReLU(),
down_irblock(
in_channels=based_dim*2,
out_channels=based_dim
)
)
self.tconv5 = nn.ConvTranspose2d(
in_channels=based_dim,
out_channels=based_dim,
kernel_size=2,
stride=2
)
self.sep = down_irblock(
in_channels=based_dim,
out_channels=in_channels*2
)
self.decoder = nn.Sequential(
self.tconv1,
self.tconv2,
self.tconv3,
self.tconv4,
self.tconv5,
nn.LeakyReLU(),
self.sep
)
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='leaky_relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, combined_image):
# encoder
# bs, c, h, w
x = self.encoder(combined_image)
# decoder
x = self.decoder(x)
return x
class MobileUnetNoCat2Heads(nn.Module):
def __init__(self, in_channels, based_dim=32):
super().__init__()
self.conv1 = convs(in_channels, based_dim)
self.sconv1 = InvertedResidualBlock(based_dim,
based_dim,
stride=2)
self.conv2 = InvertedResidualBlock(based_dim, based_dim)
self.sconv2 = InvertedResidualBlock(based_dim,
based_dim*2,
stride=2,)
self.conv3 = InvertedResidualBlock(based_dim*2, based_dim*2)
self.sconv3 = InvertedResidualBlock(based_dim*2,
based_dim*4,
stride=2)
self.conv4 = InvertedResidualBlock(based_dim*4, based_dim*4)
self.sconv4 = InvertedResidualBlock(based_dim*4,
based_dim*8,
stride=2)
self.conv5 = InvertedResidualBlock(based_dim*8, based_dim*8)
self.sconv5 = InvertedResidualBlock(based_dim*8,
based_dim*16,
stride=2)
self.encoder = nn.Sequential(
self.conv1,
self.sconv1,
self.conv2,
self.sconv2,
self.conv3,
self.sconv3,
self.conv4,
self.sconv4,
self.conv5,
self.sconv5
)
self.tconv1h1 = nn.Sequential(
nn.ConvTranspose2d(
in_channels=based_dim*16,
out_channels=based_dim*16,
kernel_size=2,
stride=2
),
nn.LeakyReLU(),
down_irblock(
in_channels=based_dim*16,
out_channels=based_dim*8
)
)
self.tconv1h2 = nn.Sequential(
nn.ConvTranspose2d(
in_channels=based_dim*16,
out_channels=based_dim*16,
kernel_size=2,
stride=2
),
nn.LeakyReLU(),
down_irblock(
in_channels=based_dim*16,
out_channels=based_dim*8
)
)
self.tconv2h1 = nn.Sequential(
nn.ConvTranspose2d(
in_channels=based_dim*8,
out_channels=based_dim*8,
kernel_size=2,
stride=2
),
nn.LeakyReLU(),
down_irblock(
in_channels=based_dim*8,
out_channels=based_dim*4
)
)
self.tconv2h2 = nn.Sequential(
nn.ConvTranspose2d(
in_channels=based_dim*8,
out_channels=based_dim*8,
kernel_size=2,
stride=2
),
nn.LeakyReLU(),
down_irblock(
in_channels=based_dim*8,
out_channels=based_dim*4
)
)
self.tconv3h1 = nn.Sequential(
nn.ConvTranspose2d(
in_channels=based_dim*4,
out_channels=based_dim*4,
kernel_size=2,
stride=2
),
nn.LeakyReLU(),
down_irblock(
in_channels=based_dim*4,
out_channels=based_dim*2
)
)
self.tconv3h2 = nn.Sequential(
nn.ConvTranspose2d(
in_channels=based_dim*4,
out_channels=based_dim*4,
kernel_size=2,
stride=2
),
nn.LeakyReLU(),
down_irblock(
in_channels=based_dim*4,
out_channels=based_dim*2
)
)
self.tconv4h1 = nn.Sequential(
nn.ConvTranspose2d(
in_channels=based_dim*2,
out_channels=based_dim*2,
kernel_size=2,
stride=2
),
nn.LeakyReLU(),
down_irblock(
in_channels=based_dim*2,
out_channels=based_dim
)
)
self.tconv4h2 = nn.Sequential(
nn.ConvTranspose2d(
in_channels=based_dim*2,
out_channels=based_dim*2,
kernel_size=2,
stride=2
),
nn.LeakyReLU(),
down_irblock(
in_channels=based_dim*2,
out_channels=based_dim
)
)
self.tconv5h1 = nn.ConvTranspose2d(
in_channels=based_dim,
out_channels=based_dim,
kernel_size=2,
stride=2
)
self.tconv5h2 = nn.ConvTranspose2d(
in_channels=based_dim,
out_channels=based_dim,
kernel_size=2,
stride=2
)
self.seph1 = down_irblock(
in_channels=based_dim,
out_channels=in_channels
)
self.seph2 = down_irblock(
in_channels=based_dim,
out_channels=in_channels
)
self.decoderh1 = nn.Sequential(
self.tconv1h1,
self.tconv2h1,
self.tconv3h1,
self.tconv4h1,
self.tconv5h1,
nn.LeakyReLU(),
self.seph1
)
self.decoderh2 = nn.Sequential(
self.tconv1h2,
self.tconv2h2,
self.tconv3h2,
self.tconv4h2,
self.tconv5h2,
nn.LeakyReLU(),
self.seph2
)
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='leaky_relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, combined_image):
# encoder
# bs, c, h, w
x = self.encoder(combined_image)
# decoder
xh1, xh2 = self.decoderh1(x), self.decoderh2(x)
x = torch.cat([xh1, xh2], dim=1)
return x
class DeConvNormReLU(nn.Module):
def __init__(
self,
in_channels,
out_channels,
kernel_size=2,
stride=2,
groups=1,
activation=None,
norm_layer=None):
super().__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if activation is None:
activation = nn.LeakyReLU()
self.conv = nn.ConvTranspose2d(
in_channels,
out_channels,
kernel_size,
stride,
groups=groups,
bias=False)
self.norm = norm_layer(out_channels)
self.activate = activation
def forward(self, x):
x = self.conv(x)
x = self.norm(x)
return self.activate(x)
class InvertedResidualTransBlock(nn.Module):
def __init__(
self,
in_channels,
out_channels,
stride=2,
t=6,
norm_layer=None):
super().__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
hidden_dim = int(in_channels * t)
modules = [ConvNormReLU(in_channels, hidden_dim,
kernel_size=1, norm_layer=norm_layer)]
modules.append(DeConvNormReLU(hidden_dim,
hidden_dim,
stride=stride,
groups=hidden_dim,
norm_layer=norm_layer))
modules.append(
nn.Conv2d(hidden_dim,
out_channels,
kernel_size=1,
bias=False))
modules.append(norm_layer(out_channels))
self.irtblock = nn.Sequential(*modules)
def forward(self, x):
return self.irtblock(x)
class DistractionConv(nn.Module):
"""Inspired by SKNet"""
def __init__(self, in_channels, M=3, r=16, stride=1, L=32, norm=None):
super().__init__()
d = max(in_channels//r, L)
if norm is None:
norm = nn.BatchNorm2d
self.in_channels, self.M = in_channels, M
self.covsA, self.covsB = nn.ModuleList([]), nn.ModuleList([])
for _ in range(M):
self.covsA.append(InvertedResidualBlock(
in_channels,
in_channels,
stride=stride,
))
self.covsB.append(InvertedResidualBlock(
in_channels,
in_channels,
stride=stride,
))
self.gapA = nn.AdaptiveAvgPool2d((1, 1))
self.gapB = nn.AdaptiveAvgPool2d((1, 1))
self.fcA = nn.Sequential(
nn.Conv2d(in_channels, d, kernel_size=1, stride=1, bias=False),
norm(d),
nn.LeakyReLU(inplace=True)
)
self.fcB = nn.Sequential(
nn.Conv2d(in_channels, d, kernel_size=1, stride=1, bias=False),
norm(d),
nn.LeakyReLU(inplace=True)
)
self.fcsA = nn.ModuleList(
[nn.Conv2d(
d, in_channels, kernel_size=1, stride=1
) for _ in range(M)])
self.fcsB = nn.ModuleList(
[nn.Conv2d(
d, in_channels, kernel_size=1, stride=1
) for _ in range(M)])
# self.softmin = nn.Softmin(dim=1)
self.softmax = nn.Softmax(dim=1)
def forward(self, A, B):
batch_size = A.size(0)
featsA, featsB = ([deconv(A) for deconv in self.covsA],
[deconv(B) for deconv in self.covsB])
featsA, featsB = torch.cat(featsA, dim=1), torch.cat(featsB, dim=1)
featsA, featsB = (featsA.view(
batch_size,
self.M,
self.in_channels,
featsA.shape[2],
featsA.shape[3]),
featsB.view(
batch_size,
self.M,
self.in_channels,
featsB.shape[2],
featsB.shape[3]))
featsA_U, featsB_U = torch.sum(featsA, dim=1), torch.sum(featsB, dim=1)
featsA_S, featsB_S = self.gapA(featsA_U), self.gapB(featsB_U)
featsA_Z, featsB_Z = self.fcA(featsA_S), self.fcB(featsB_S)
distractionA, distractionB = ([fc(featsA_Z) for fc in self.fcsA],
[fc(featsB_Z) for fc in self.fcsB])
distractionA, distractionB = (torch.cat(distractionA, dim=1),
torch.cat(distractionB, dim=1))
distractionA, distractionB = (distractionA.view(
batch_size,
self.M,
self.in_channels, 1, 1
),
distractionB.view(
batch_size,
self.M,
self.in_channels, 1, 1)
)
distractionA, distractionB = (self.softmax(distractionA),
self.softmax(distractionB))
featsA_V, featsB_V = (torch.sum(featsA*distractionA, dim=1),
torch.sum(featsB*distractionB, dim=1))
return featsA_V, featsB_V
class MobileUnet2HeadsDistraction(MobileUnetNoCat2Heads):
def __init__(self, in_channels, based_dim=32):
super().__init__(in_channels, based_dim)
self.distract1 = DistractionConv(based_dim*8)
self.tconv1h1 = InvertedResidualTransBlock(based_dim*16, based_dim*8)
self.tconv1h2 = InvertedResidualTransBlock(based_dim*16, based_dim*8)
self.distract2 = DistractionConv(based_dim*4)
self.tconv2h1 = InvertedResidualTransBlock(based_dim*8, based_dim*4)
self.tconv2h2 = InvertedResidualTransBlock(based_dim*8, based_dim*4)
self.distract3 = DistractionConv(based_dim*2)
self.tconv3h1 = InvertedResidualTransBlock(based_dim*4, based_dim*2)
self.tconv3h2 = InvertedResidualTransBlock(based_dim*4, based_dim*2)
self.distract4 = DistractionConv(based_dim)
self.tconv4h1 = InvertedResidualTransBlock(based_dim*2, based_dim)
self.tconv4h2 = InvertedResidualTransBlock(based_dim*2, based_dim)
self.distract5 = DistractionConv(in_channels)
self.tconv5h1 = InvertedResidualTransBlock(based_dim, in_channels)
self.tconv5h2 = InvertedResidualTransBlock(based_dim, in_channels)
self._initialize_weights()
def forward(self, combined_image):
# bs, c, h, w
# encoder
A = B = self.encoder(combined_image)
# decoder
A, B = self.tconv1h1(A), self.tconv1h2(B)
A, B = self.distract1(A, B)
A, B = self.tconv2h1(A), self.tconv2h2(B)
A, B = self.distract2(A, B)
A, B = self.tconv3h1(A), self.tconv3h2(B)
A, B = self.distract3(A, B)
A, B = self.tconv4h1(A), self.tconv4h2(B)
A, B = self.distract4(A, B)
A, B = self.tconv5h1(A), self.tconv5h2(B)
A, B = self.distract5(A, B)
x = torch.cat((A, B), dim=1)
return x
| 39,819 | 191 | 1,174 |
bd9fc6a2e9a1bd5460c3136bbc62956e2761bb63 | 1,604 | py | Python | examples/script.py | FelixBoelle/aiida-ase-basic | 25a89835b880e07e695a0fdef280be4319b23cb4 | [
"MIT"
] | null | null | null | examples/script.py | FelixBoelle/aiida-ase-basic | 25a89835b880e07e695a0fdef280be4319b23cb4 | [
"MIT"
] | 1 | 2019-11-06T18:10:56.000Z | 2019-11-06T18:10:56.000Z | examples/script.py | FelixBoelle/aiida-ase-basic | 25a89835b880e07e695a0fdef280be4319b23cb4 | [
"MIT"
] | 1 | 2019-11-07T08:44:47.000Z | 2019-11-07T08:44:47.000Z | """
Test script that uses ASE to run an EMT calculation
Script partly from the ASE intro tutorials
https://wiki.fysik.dtu.dk/ase/tutorials/surface.html
"""
# --------------------- STEP 1: Prepare the atoms/structure object ------------
#from ase.build import fcc111
#h = 1.85
#d = 1.10
#atoms = fcc111('Cu', size=(4, 4, 2), vacuum=10.0)
#atoms.write('atoms_in.json', format='json')
from ase.io import read
atoms = read('atoms_in.json')
# ==================== START ASE SCRIPT to AiiDA ==============================
from ase.calculators.emt import EMT
from ase.optimize import FIRE
# -------------------- STEP 2: Attach the calculator --------------------------
calc = EMT(properties=['energy', 'stress'])
atoms.set_calculator(calc)
# -------------------- STEP 3: run the dynamics -------------------------------
# write optimizer steps to logfile
dyn = FIRE(atoms, trajectory='Cu111.traj', logfile='FIRE.log')
dyn.run(fmax=0.05)
# -------------------- STEP 4: Extract and save results -----------------------
results = {}
results['potential_energy'] = atoms.get_potential_energy()
results['stress'] = atoms.get_stress()
print('potential energy: ', results['potential_energy'])
print('stress: ', results['stress'])
# ==================== END ASE SCRIPT to AiiDA ================================
# NEED TO STORE
# writes last step of optimization to a .json file that is storable in a db
atoms.write('atoms_out.json', format='json') # to store 1
# FIRE.log file for reference - to store 2
# results as entry in database - to store 3
# reference to Cu111.traj file for provenance - to store 4
| 32.734694 | 79 | 0.597257 | """
Test script that uses ASE to run an EMT calculation
Script partly from the ASE intro tutorials
https://wiki.fysik.dtu.dk/ase/tutorials/surface.html
"""
# --------------------- STEP 1: Prepare the atoms/structure object ------------
#from ase.build import fcc111
#h = 1.85
#d = 1.10
#atoms = fcc111('Cu', size=(4, 4, 2), vacuum=10.0)
#atoms.write('atoms_in.json', format='json')
from ase.io import read
atoms = read('atoms_in.json')
# ==================== START ASE SCRIPT to AiiDA ==============================
from ase.calculators.emt import EMT
from ase.optimize import FIRE
# -------------------- STEP 2: Attach the calculator --------------------------
calc = EMT(properties=['energy', 'stress'])
atoms.set_calculator(calc)
# -------------------- STEP 3: run the dynamics -------------------------------
# write optimizer steps to logfile
dyn = FIRE(atoms, trajectory='Cu111.traj', logfile='FIRE.log')
dyn.run(fmax=0.05)
# -------------------- STEP 4: Extract and save results -----------------------
results = {}
results['potential_energy'] = atoms.get_potential_energy()
results['stress'] = atoms.get_stress()
print('potential energy: ', results['potential_energy'])
print('stress: ', results['stress'])
# ==================== END ASE SCRIPT to AiiDA ================================
# NEED TO STORE
# writes last step of optimization to a .json file that is storable in a db
atoms.write('atoms_out.json', format='json') # to store 1
# FIRE.log file for reference - to store 2
# results as entry in database - to store 3
# reference to Cu111.traj file for provenance - to store 4
| 0 | 0 | 0 |
b692793606989fb6d0b83138ad1b450a2f06bf18 | 510 | py | Python | app/board_app/migrations/0003_auto_20200708_1139.py | KimKiHyuk/BenefitObserver | 74d59ee2d9dc81f8b8423e14a9ce950fa21f332b | [
"MIT"
] | null | null | null | app/board_app/migrations/0003_auto_20200708_1139.py | KimKiHyuk/BenefitObserver | 74d59ee2d9dc81f8b8423e14a9ce950fa21f332b | [
"MIT"
] | 8 | 2021-03-30T13:53:18.000Z | 2022-03-02T14:54:13.000Z | app/board_app/migrations/0003_auto_20200708_1139.py | KimKiHyuk/BenefitObserver | 74d59ee2d9dc81f8b8423e14a9ce950fa21f332b | [
"MIT"
] | null | null | null | # Generated by Django 3.0.7 on 2020-07-08 11:39
from django.db import migrations
| 23.181818 | 81 | 0.584314 | # Generated by Django 3.0.7 on 2020-07-08 11:39
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('board_app', '0002_auto_20200630_0938'),
]
operations = [
migrations.AlterModelOptions(
name='posts',
options={'ordering': ['updated_at'], 'verbose_name_plural': 'Posts'},
),
migrations.AlterModelOptions(
name='url',
options={'verbose_name_plural': 'Urls'},
),
]
| 0 | 404 | 23 |
d1262dcb778ce01b695f8d85addafbc5daac572e | 6,646 | py | Python | plyer/platforms/win/cpu.py | EdwardCoventry/plyer | 4002b21fe1a664e80b422547b8ae04d2a2d3037d | [
"MIT"
] | 1,184 | 2015-01-02T23:24:46.000Z | 2022-03-27T16:28:16.000Z | plyer/platforms/win/cpu.py | EdwardCoventry/plyer | 4002b21fe1a664e80b422547b8ae04d2a2d3037d | [
"MIT"
] | 469 | 2015-01-02T09:23:15.000Z | 2022-03-17T10:35:58.000Z | plyer/platforms/win/cpu.py | EdwardCoventry/plyer | 4002b21fe1a664e80b422547b8ae04d2a2d3037d | [
"MIT"
] | 431 | 2015-01-05T23:00:43.000Z | 2022-03-15T04:20:03.000Z | '''
Module of Windows API for plyer.cpu.
'''
from ctypes import (
c_ulonglong, c_ulong, byref,
Structure, POINTER, Union, windll, create_string_buffer,
sizeof, cast, c_void_p, c_uint32
)
from ctypes.wintypes import (
BYTE, DWORD, WORD
)
from plyer.facades import CPU
KERNEL = windll.kernel32
ERROR_INSUFFICIENT_BUFFER = 0x0000007A
class CacheType:
'''
Win API PROCESSOR_CACHE_TYPE enum.
'''
unified = 0
instruction = 1
data = 2
trace = 3
class RelationshipType:
'''
Win API LOGICAL_PROCESSOR_RELATIONSHIP enum.
'''
processor_core = 0 # logical proc sharing single core
numa_node = 1 # logical proc sharing single NUMA node
cache = 2 # logical proc sharing cache
processor_package = 3 # logical proc sharing physical package
group = 4 # logical proc sharing processor group
all = 0xffff # logical proc info for all groups
class CacheDescriptor(Structure):
'''
Win API CACHE_DESCRIPTOR struct.
'''
_fields_ = [
('Level', BYTE),
('Associativity', BYTE),
('LineSize', WORD),
('Size', DWORD),
('Type', DWORD)
]
class ProcessorCore(Structure):
'''
Win API ProcessorCore struct.
'''
_fields_ = [('Flags', BYTE)]
class NumaNode(Structure):
'''
Win API NumaNode struct.
'''
_fields_ = [('NodeNumber', DWORD)]
class SystemLPIUnion(Union):
'''
Win API SYSTEM_LOGICAL_PROCESSOR_INFORMATION union without name.
'''
_fields_ = [
('ProcessorCore', ProcessorCore),
('NumaNode', NumaNode),
('Cache', CacheDescriptor),
('Reserved', c_ulonglong)
]
class SystemLPI(Structure):
'''
Win API SYSTEM_LOGICAL_PROCESSOR_INFORMATION struct.
'''
_fields_ = [
('ProcessorMask', c_ulong),
('Relationship', c_ulong),
('LPI', SystemLPIUnion)
]
class WinCPU(CPU):
'''
Implementation of Windows CPU API.
'''
@staticmethod
def instance():
'''
Instance for facade proxy.
'''
return WinCPU()
# Resources:
# GetLogicalProcessInformation
# https://msdn.microsoft.com/en-us/library/ms683194(v=vs.85).aspx
# SYSTEM_LOGICAL_PROCESSOR_INFORMATION
# https://msdn.microsoft.com/en-us/library/ms686694(v=vs.85).aspx
# LOGICAL_PROCESSOR_RELATIONSHIP enum (0 - 4, 0xffff)
# https://msdn.microsoft.com/2ada52f0-70ec-4146-9ef7-9af3b08996f9
# CACHE_DESCRIPTOR struct
# https://msdn.microsoft.com/38cfa605-831c-45ef-a99f-55f42b2b56e9
# PROCESSOR_CACHE_TYPE
# https://msdn.microsoft.com/23044f67-e944-43c2-8c75-3d2fba87cb3c
# C example
# https://msdn.microsoft.com/en-us/904d2d35-f419-4e8f-a689-f39ed926644c
| 26.268775 | 75 | 0.592838 | '''
Module of Windows API for plyer.cpu.
'''
from ctypes import (
c_ulonglong, c_ulong, byref,
Structure, POINTER, Union, windll, create_string_buffer,
sizeof, cast, c_void_p, c_uint32
)
from ctypes.wintypes import (
BYTE, DWORD, WORD
)
from plyer.facades import CPU
KERNEL = windll.kernel32
ERROR_INSUFFICIENT_BUFFER = 0x0000007A
class CacheType:
'''
Win API PROCESSOR_CACHE_TYPE enum.
'''
unified = 0
instruction = 1
data = 2
trace = 3
class RelationshipType:
'''
Win API LOGICAL_PROCESSOR_RELATIONSHIP enum.
'''
processor_core = 0 # logical proc sharing single core
numa_node = 1 # logical proc sharing single NUMA node
cache = 2 # logical proc sharing cache
processor_package = 3 # logical proc sharing physical package
group = 4 # logical proc sharing processor group
all = 0xffff # logical proc info for all groups
class CacheDescriptor(Structure):
'''
Win API CACHE_DESCRIPTOR struct.
'''
_fields_ = [
('Level', BYTE),
('Associativity', BYTE),
('LineSize', WORD),
('Size', DWORD),
('Type', DWORD)
]
class ProcessorCore(Structure):
'''
Win API ProcessorCore struct.
'''
_fields_ = [('Flags', BYTE)]
class NumaNode(Structure):
'''
Win API NumaNode struct.
'''
_fields_ = [('NodeNumber', DWORD)]
class SystemLPIUnion(Union):
'''
Win API SYSTEM_LOGICAL_PROCESSOR_INFORMATION union without name.
'''
_fields_ = [
('ProcessorCore', ProcessorCore),
('NumaNode', NumaNode),
('Cache', CacheDescriptor),
('Reserved', c_ulonglong)
]
class SystemLPI(Structure):
'''
Win API SYSTEM_LOGICAL_PROCESSOR_INFORMATION struct.
'''
_fields_ = [
('ProcessorMask', c_ulong),
('Relationship', c_ulong),
('LPI', SystemLPIUnion)
]
class WinCPU(CPU):
'''
Implementation of Windows CPU API.
'''
@staticmethod
def _countbits(mask):
# make sure the correct ULONG_PTR size is used on 64bit
# https://docs.microsoft.com/en-us/windows/
# desktop/WinProg/windows-data-types
# note: not a pointer per-se, != PULONG_PTR
ulong_ptr = c_ulonglong if sizeof(c_void_p) == 8 else c_ulong
# note: c_ulonglong only on 64bit, otherwise c_ulong
# DWORD == c_uint32
# https://docs.microsoft.com/en-us/windows/
# desktop/WinProg/windows-data-types
lshift = c_uint32(sizeof(ulong_ptr) * 8 - 1)
assert lshift.value in (31, 63), lshift # 32 or 64 bits - 1
lshift = lshift.value
test = 1 << lshift
assert test % 2 == 0, test
count = 0
i = 0
while i <= lshift:
i += 1
# do NOT remove!!!
# test value has to be %2 == 0,
# except the last case where the value is 1,
# so that int(test) == int(float(test))
# and the mask bit is counted correctly
assert test % 2 == 0 or float(test) == 1.0, test
# https://stackoverflow.com/a/1746642/5994041
# note: useful to print(str(bin(int(...)))[2:])
count += 1 if (mask & int(test)) else 0
test /= 2
return count
def _logprocinfo(self, relationship):
get_logical_process_info = KERNEL.GetLogicalProcessorInformation
# first call with no structure to get the real size of the required
buff_length = c_ulong(0)
result = get_logical_process_info(None, byref(buff_length))
assert not result, result
error = KERNEL.GetLastError()
assert error == ERROR_INSUFFICIENT_BUFFER, error
assert buff_length, buff_length
# create buffer from the real winapi buffer length
buff = create_string_buffer(buff_length.value)
# call again with buffer pointer + the same length as arguments
result = get_logical_process_info(buff, byref(buff_length))
assert result, (result, KERNEL.GetLastError())
# memory size of one LPI struct in the array of LPI structs
offset = sizeof(SystemLPI) # ok
values = {
key: 0 for key in (
'relationship', 'mask',
'L1', 'L2', 'L3'
)
}
for i in range(0, buff_length.value, offset):
slpi = cast(
buff[i: i + offset],
POINTER(SystemLPI)
).contents
if slpi.Relationship != relationship:
continue
values['relationship'] += 1
values['mask'] += self._countbits(slpi.ProcessorMask)
if slpi.LPI.Cache.Level == 1:
values['L1'] += 1
elif slpi.LPI.Cache.Level == 2:
values['L2'] += 1
elif slpi.LPI.Cache.Level == 3:
values['L3'] += 1
return values
def _sockets(self):
# physical CPU sockets (or slots) on motherboard
return self._logprocinfo(
RelationshipType.processor_package
)['relationship']
def _physical(self):
# cores
return self._logprocinfo(
RelationshipType.processor_core
)['relationship']
def _logical(self):
# cores * threads
# if hyperthreaded core -> more than one logical processor
return self._logprocinfo(
RelationshipType.processor_core
)['mask']
def _cache(self):
# L1, L2, L3 cache count
result = self._logprocinfo(
RelationshipType.cache
)
return {
key: result[key]
for key in result
if key in ('L1', 'L2', 'L3')
}
def _numa(self):
# numa nodes
return self._logprocinfo(
RelationshipType.numa_node
)['relationship']
def instance():
'''
Instance for facade proxy.
'''
return WinCPU()
# Resources:
# GetLogicalProcessInformation
# https://msdn.microsoft.com/en-us/library/ms683194(v=vs.85).aspx
# SYSTEM_LOGICAL_PROCESSOR_INFORMATION
# https://msdn.microsoft.com/en-us/library/ms686694(v=vs.85).aspx
# LOGICAL_PROCESSOR_RELATIONSHIP enum (0 - 4, 0xffff)
# https://msdn.microsoft.com/2ada52f0-70ec-4146-9ef7-9af3b08996f9
# CACHE_DESCRIPTOR struct
# https://msdn.microsoft.com/38cfa605-831c-45ef-a99f-55f42b2b56e9
# PROCESSOR_CACHE_TYPE
# https://msdn.microsoft.com/23044f67-e944-43c2-8c75-3d2fba87cb3c
# C example
# https://msdn.microsoft.com/en-us/904d2d35-f419-4e8f-a689-f39ed926644c
| 3,716 | 0 | 188 |
7259792742b0ad4d459082903d93acb6d50ac521 | 735 | py | Python | gaussian.py | santosmv/probability-distributions | 7e21069a6998897d996dec4e8bafc775b2fe0098 | [
"MIT"
] | null | null | null | gaussian.py | santosmv/probability-distributions | 7e21069a6998897d996dec4e8bafc775b2fe0098 | [
"MIT"
] | null | null | null | gaussian.py | santosmv/probability-distributions | 7e21069a6998897d996dec4e8bafc775b2fe0098 | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
from distributions import gaussian
mu_list = [0, -5, 3]
sigma_list = [1, 2, 4]
x = np.linspace(-15, 15, 500)
for i in range(len(mu_list)):
mu = mu_list[i]
sigma = sigma_list[i]
plt.plot(x, gaussian(x, mu, sigma), label=r'$\mu = %d$'%mu + r' $\sigma = %d$'%sigma)
markerline, stemlines, baseline = plt.stem(mu_list, gaussian(np.array(mu_list), np.array(mu_list), np.array(sigma_list)), '--', label='Expected value')
plt.setp(stemlines, 'color', 'gainsboro')
plt.setp(markerline, 'color', 'silver')
plt.setp(baseline, visible=False)
plt.xlabel('x', fontsize=15)
plt.ylabel(r'$P(x, \mu, \sigma)$', fontsize=15)
plt.xlim(-15,15)
plt.ylim(0,0.5)
plt.legend()
plt.show() | 28.269231 | 151 | 0.668027 | import numpy as np
import matplotlib.pyplot as plt
from distributions import gaussian
mu_list = [0, -5, 3]
sigma_list = [1, 2, 4]
x = np.linspace(-15, 15, 500)
for i in range(len(mu_list)):
mu = mu_list[i]
sigma = sigma_list[i]
plt.plot(x, gaussian(x, mu, sigma), label=r'$\mu = %d$'%mu + r' $\sigma = %d$'%sigma)
markerline, stemlines, baseline = plt.stem(mu_list, gaussian(np.array(mu_list), np.array(mu_list), np.array(sigma_list)), '--', label='Expected value')
plt.setp(stemlines, 'color', 'gainsboro')
plt.setp(markerline, 'color', 'silver')
plt.setp(baseline, visible=False)
plt.xlabel('x', fontsize=15)
plt.ylabel(r'$P(x, \mu, \sigma)$', fontsize=15)
plt.xlim(-15,15)
plt.ylim(0,0.5)
plt.legend()
plt.show() | 0 | 0 | 0 |
f1127d1ceb6b0f1debf418a6f19cebc92d82130c | 972 | py | Python | utils.py | ThomasRanvier/autoencoders_and_gans | cab05522d847c512cdfea4b853f454d2fdd80581 | [
"MIT"
] | 1 | 2021-02-04T21:50:24.000Z | 2021-02-04T21:50:24.000Z | utils.py | ThomasRanvier/image_restoration | cab05522d847c512cdfea4b853f454d2fdd80581 | [
"MIT"
] | null | null | null | utils.py | ThomasRanvier/image_restoration | cab05522d847c512cdfea4b853f454d2fdd80581 | [
"MIT"
] | null | null | null | import datasets
import matplotlib.pyplot as plt
| 31.354839 | 110 | 0.688272 | import datasets
import matplotlib.pyplot as plt
def _raise(ex):
raise NotImplementedError(ex)
def load_dataset(dataset_name, batch_size=128):
datasets_switch = {
'mnist': datasets.load_mnist,
'noisy_mnist': datasets.load_noisy_mnist,
'fashion_mnist': datasets.load_fashion_mnist,
'noisy_fashion_mnist': datasets.load_noisy_fashion_mnist,
'cifar': datasets.load_cifar,
'noisy_cifar': datasets.load_noisy_cifar,
'lfw': datasets.load_lfw,
'blurry_lfw': datasets.load_blurry_lfw,
'bsds500': datasets.load_bsds500,
}
return datasets_switch.get(dataset_name, lambda x: _raise(f'Dataset {dataset_name} unknown!'))(batch_size)
def plot_losses(losses, path, title='Evaluation losses'):
fig = plt.figure()
plt.plot([i + 1 for i in range(len(losses))], losses, 'red')
plt.title(title)
plt.xlabel('epochs')
plt.ylabel('loss')
plt.tight_layout()
plt.savefig(path) | 853 | 0 | 69 |
ebe2a5ddb21e51192a8434ce010c70d3fbda8c69 | 6,271 | py | Python | tests/netcdf_engine/test_convert_multifragments.py | TileDB-Inc/TileDB-CF-Py | 9aab0fe9ba7346a1846c7458a5d08b123dcf90a8 | [
"MIT"
] | 12 | 2021-06-07T16:51:32.000Z | 2022-03-10T12:48:00.000Z | tests/netcdf_engine/test_convert_multifragments.py | TileDB-Inc/TileDB-CF-Py | 9aab0fe9ba7346a1846c7458a5d08b123dcf90a8 | [
"MIT"
] | 72 | 2021-04-28T21:49:41.000Z | 2022-02-24T13:58:11.000Z | tests/netcdf_engine/test_convert_multifragments.py | TileDB-Inc/TileDB-CF-Py | 9aab0fe9ba7346a1846c7458a5d08b123dcf90a8 | [
"MIT"
] | 3 | 2021-08-11T16:33:37.000Z | 2021-12-01T20:31:12.000Z | # Copyright 2021 TileDB Inc.
# Licensed under the MIT License.
import numpy as np
import pytest
import tiledb
from tiledb.cf import Group, NetCDF4ConverterEngine
netCDF4 = pytest.importorskip("netCDF4")
class TestSimplyCopyChunks:
"""Test converting a simple NetCDF in chunks.
NetCDF File:
dimensions:
x (8)
y (8)
z (8)
variables:
f (x, y, z) = reshape([0, ..., 511], (8, 8, 8))
"""
attr_data = np.reshape(np.arange(512), (8, 8, 8))
@pytest.fixture(scope="class")
def netcdf_file(self, tmpdir_factory):
"""Returns the NetCDF file that will be used to test the conversion."""
filepath = tmpdir_factory.mktemp("input_file").join("simple_copy_chunks.nc")
with netCDF4.Dataset(filepath, mode="w") as dataset:
dataset.createDimension("x", 8)
dataset.createDimension("y", 8)
dataset.createDimension("z", 8)
var = dataset.createVariable(
varname="f", datatype=np.int64, dimensions=("x", "y", "z")
)
var[:, :, :] = self.attr_data
return filepath
@pytest.mark.parametrize(
"sparse,expected_result", ((False, attr_data), (True, np.arange(512)))
)
def test_convert_chunks(self, netcdf_file, tmpdir, sparse, expected_result):
"""Test copying NetCDF file in chunks for a simple NetCDF file."""
uri = str(tmpdir.mkdir("output").join("simple_copy_chunks"))
converter = NetCDF4ConverterEngine.from_file(netcdf_file)
array_creator = converter.get_array_creator_by_attr("f")
array_creator.sparse = sparse
assert array_creator.domain_creator.max_fragment_shape == (None, None, None)
array_creator.domain_creator.max_fragment_shape = (4, 8, 2)
assert array_creator.domain_creator.max_fragment_shape == (4, 8, 2)
converter.convert_to_group(uri)
with Group(uri) as group:
with group.open_array(attr="f") as array:
array_uri = array.uri
result = array[...]
result = result["f"] if isinstance(result, dict) else result
np.testing.assert_equal(result, expected_result)
fragment_info = tiledb.FragmentInfoList(array_uri)
assert len(fragment_info) == 8
@pytest.mark.parametrize(
"sparse,expected_result",
((False, np.reshape(np.arange(512), (8, 8, 8))), (True, np.arange(512))),
)
def test_convert_chunks_with_injected(
self, netcdf_file, tmpdir, sparse, expected_result
):
"""Test copying NetCDF file in chunks for a simple NetCDF file with externally
provided dimension and attribute values."""
uri = str(tmpdir.mkdir("output").join("simple_copy_chunks"))
converter = NetCDF4ConverterEngine.from_file(netcdf_file)
converter.add_shared_dim("t", domain=(0, 3), dtype=np.uint64)
array_creator = converter.get_array_creator_by_attr("f")
array_creator.sparse = sparse
array_creator.add_attr_creator(name="g", dtype=np.float64)
array_creator.domain_creator.inject_dim_creator("t", 0)
array_creator.domain_creator.max_fragment_shape = (1, 4, 8, 2)
# Define data for extra variable
g_data = np.reshape(np.random.random_sample((512)), (1, 8, 8, 8))
converter.convert_to_group(
uri,
assigned_dim_values={"t": 0},
assigned_attr_values={"g": g_data},
)
with Group(uri) as group:
with group.open_array("array0") as array:
array_uri = array.uri
result = array[0, :, :, :]
f_result = result["f"]
np.testing.assert_equal(f_result, expected_result)
g_result = np.reshape(result["g"], (1, 8, 8, 8))
np.testing.assert_equal(g_data, g_result)
fragment_info = tiledb.FragmentInfoList(array_uri)
assert len(fragment_info) == 8
class TestCoordinateCopyChunks:
"""Test converting a simple NetCDF in chunks.
NetCDF File:
dimensions:
x (8)
y (8)
variables:
x (x) = linspace(-1, 1, 8)
y (y) = linspace(0, 2, 8)
f (x, y) = [[0, 1, ...],...,[...,62,63]]
"""
x_data = np.linspace(-1.0, 1.0, 8)
y_data = np.linspace(0.0, 2.0, 8)
attr_data = np.reshape(np.arange(64), (8, 8))
@pytest.fixture(scope="class")
def netcdf_file(self, tmpdir_factory):
"""Returns the NetCDF file that will be used to test the conversion."""
filepath = tmpdir_factory.mktemp("input_file").join("simple_copy_chunks.nc")
with netCDF4.Dataset(filepath, mode="w") as dataset:
dataset.createDimension("x", 8)
dataset.createDimension("y", 8)
var = dataset.createVariable(
varname="f", datatype=np.int64, dimensions=("x", "y")
)
var[:, :] = self.attr_data
var = dataset.createVariable(
varname="x", datatype=np.float64, dimensions=("x")
)
var[:] = self.x_data
var = dataset.createVariable(
varname="y", datatype=np.float64, dimensions=("y")
)
var[:] = self.y_data
return filepath
def test_convert_chunks(self, netcdf_file, tmpdir):
"""Test copying NetCDF file in chunks for a NetCDF to TileDB conversion that
maps NetCDF coordinates to dimensions."""
uri = str(tmpdir.mkdir("output").join("simple_copy_chunks"))
converter = NetCDF4ConverterEngine.from_file(netcdf_file, coords_to_dims=True)
converter.get_shared_dim("x").domain = (-1.0, 1.0)
converter.get_shared_dim("y").domain = (0.0, 2.0)
array_creator = converter.get_array_creator_by_attr("f")
array_creator.domain_creator.max_fragment_shape = (4, 4)
converter.convert_to_group(uri)
with Group(uri) as group:
with group.open_array(attr="f") as array:
array_uri = array.uri
result = array[...]
result = result["f"]
expected_result = np.arange(64)
np.testing.assert_equal(result, expected_result)
fragment_info = tiledb.FragmentInfoList(array_uri)
assert len(fragment_info) == 4
| 39.440252 | 86 | 0.614894 | # Copyright 2021 TileDB Inc.
# Licensed under the MIT License.
import numpy as np
import pytest
import tiledb
from tiledb.cf import Group, NetCDF4ConverterEngine
netCDF4 = pytest.importorskip("netCDF4")
class TestSimplyCopyChunks:
"""Test converting a simple NetCDF in chunks.
NetCDF File:
dimensions:
x (8)
y (8)
z (8)
variables:
f (x, y, z) = reshape([0, ..., 511], (8, 8, 8))
"""
attr_data = np.reshape(np.arange(512), (8, 8, 8))
@pytest.fixture(scope="class")
def netcdf_file(self, tmpdir_factory):
"""Returns the NetCDF file that will be used to test the conversion."""
filepath = tmpdir_factory.mktemp("input_file").join("simple_copy_chunks.nc")
with netCDF4.Dataset(filepath, mode="w") as dataset:
dataset.createDimension("x", 8)
dataset.createDimension("y", 8)
dataset.createDimension("z", 8)
var = dataset.createVariable(
varname="f", datatype=np.int64, dimensions=("x", "y", "z")
)
var[:, :, :] = self.attr_data
return filepath
@pytest.mark.parametrize(
"sparse,expected_result", ((False, attr_data), (True, np.arange(512)))
)
def test_convert_chunks(self, netcdf_file, tmpdir, sparse, expected_result):
"""Test copying NetCDF file in chunks for a simple NetCDF file."""
uri = str(tmpdir.mkdir("output").join("simple_copy_chunks"))
converter = NetCDF4ConverterEngine.from_file(netcdf_file)
array_creator = converter.get_array_creator_by_attr("f")
array_creator.sparse = sparse
assert array_creator.domain_creator.max_fragment_shape == (None, None, None)
array_creator.domain_creator.max_fragment_shape = (4, 8, 2)
assert array_creator.domain_creator.max_fragment_shape == (4, 8, 2)
converter.convert_to_group(uri)
with Group(uri) as group:
with group.open_array(attr="f") as array:
array_uri = array.uri
result = array[...]
result = result["f"] if isinstance(result, dict) else result
np.testing.assert_equal(result, expected_result)
fragment_info = tiledb.FragmentInfoList(array_uri)
assert len(fragment_info) == 8
@pytest.mark.parametrize(
"sparse,expected_result",
((False, np.reshape(np.arange(512), (8, 8, 8))), (True, np.arange(512))),
)
def test_convert_chunks_with_injected(
self, netcdf_file, tmpdir, sparse, expected_result
):
"""Test copying NetCDF file in chunks for a simple NetCDF file with externally
provided dimension and attribute values."""
uri = str(tmpdir.mkdir("output").join("simple_copy_chunks"))
converter = NetCDF4ConverterEngine.from_file(netcdf_file)
converter.add_shared_dim("t", domain=(0, 3), dtype=np.uint64)
array_creator = converter.get_array_creator_by_attr("f")
array_creator.sparse = sparse
array_creator.add_attr_creator(name="g", dtype=np.float64)
array_creator.domain_creator.inject_dim_creator("t", 0)
array_creator.domain_creator.max_fragment_shape = (1, 4, 8, 2)
# Define data for extra variable
g_data = np.reshape(np.random.random_sample((512)), (1, 8, 8, 8))
converter.convert_to_group(
uri,
assigned_dim_values={"t": 0},
assigned_attr_values={"g": g_data},
)
with Group(uri) as group:
with group.open_array("array0") as array:
array_uri = array.uri
result = array[0, :, :, :]
f_result = result["f"]
np.testing.assert_equal(f_result, expected_result)
g_result = np.reshape(result["g"], (1, 8, 8, 8))
np.testing.assert_equal(g_data, g_result)
fragment_info = tiledb.FragmentInfoList(array_uri)
assert len(fragment_info) == 8
class TestCoordinateCopyChunks:
"""Test converting a simple NetCDF in chunks.
NetCDF File:
dimensions:
x (8)
y (8)
variables:
x (x) = linspace(-1, 1, 8)
y (y) = linspace(0, 2, 8)
f (x, y) = [[0, 1, ...],...,[...,62,63]]
"""
x_data = np.linspace(-1.0, 1.0, 8)
y_data = np.linspace(0.0, 2.0, 8)
attr_data = np.reshape(np.arange(64), (8, 8))
@pytest.fixture(scope="class")
def netcdf_file(self, tmpdir_factory):
"""Returns the NetCDF file that will be used to test the conversion."""
filepath = tmpdir_factory.mktemp("input_file").join("simple_copy_chunks.nc")
with netCDF4.Dataset(filepath, mode="w") as dataset:
dataset.createDimension("x", 8)
dataset.createDimension("y", 8)
var = dataset.createVariable(
varname="f", datatype=np.int64, dimensions=("x", "y")
)
var[:, :] = self.attr_data
var = dataset.createVariable(
varname="x", datatype=np.float64, dimensions=("x")
)
var[:] = self.x_data
var = dataset.createVariable(
varname="y", datatype=np.float64, dimensions=("y")
)
var[:] = self.y_data
return filepath
def test_convert_chunks(self, netcdf_file, tmpdir):
"""Test copying NetCDF file in chunks for a NetCDF to TileDB conversion that
maps NetCDF coordinates to dimensions."""
uri = str(tmpdir.mkdir("output").join("simple_copy_chunks"))
converter = NetCDF4ConverterEngine.from_file(netcdf_file, coords_to_dims=True)
converter.get_shared_dim("x").domain = (-1.0, 1.0)
converter.get_shared_dim("y").domain = (0.0, 2.0)
array_creator = converter.get_array_creator_by_attr("f")
array_creator.domain_creator.max_fragment_shape = (4, 4)
converter.convert_to_group(uri)
with Group(uri) as group:
with group.open_array(attr="f") as array:
array_uri = array.uri
result = array[...]
result = result["f"]
expected_result = np.arange(64)
np.testing.assert_equal(result, expected_result)
fragment_info = tiledb.FragmentInfoList(array_uri)
assert len(fragment_info) == 4
| 0 | 0 | 0 |
ada7701a7425979917dc464f79038b2b74c9fb6d | 14,117 | py | Python | mi/dataset/driver/ctdpf_ckl/wfp/test/test_driver.py | rhan1498/marine-integrations | ad94c865e0e4cc7c8fd337870410c74b57d5c826 | [
"BSD-2-Clause"
] | null | null | null | mi/dataset/driver/ctdpf_ckl/wfp/test/test_driver.py | rhan1498/marine-integrations | ad94c865e0e4cc7c8fd337870410c74b57d5c826 | [
"BSD-2-Clause"
] | null | null | null | mi/dataset/driver/ctdpf_ckl/wfp/test/test_driver.py | rhan1498/marine-integrations | ad94c865e0e4cc7c8fd337870410c74b57d5c826 | [
"BSD-2-Clause"
] | null | null | null | """
@package mi.dataset.driver.ctdpf_ckl.wfp.test.test_driver
@file marine-integrations/mi/dataset/driver/ctdpf_ckl/wfp/driver.py
@author cgoodrich
@brief Test cases for ctdpf_ckl_wfp driver
USAGE:
Make tests verbose and provide stdout
* From the IDK
$ bin/dsa/test_driver
$ bin/dsa/test_driver -i [-t testname]
$ bin/dsa/test_driver -q [-t testname]
"""
__author__ = 'cgoodrich'
__license__ = 'Apache 2.0'
import unittest
from nose.plugins.attrib import attr
from mock import Mock
from pyon.agent.agent import ResourceAgentState
from interface.objects import ResourceAgentErrorEvent
from mi.core.log import get_logger ; log = get_logger()
from mi.idk.exceptions import SampleTimeout
from mi.idk.dataset.unit_test import DataSetTestCase
from mi.idk.dataset.unit_test import DataSetIntegrationTestCase
from mi.idk.dataset.unit_test import DataSetQualificationTestCase
from mi.dataset.dataset_driver import DriverParameter
from mi.dataset.dataset_driver import DataSourceConfigKey, DataSetDriverConfigKeys
from mi.dataset.driver.ctdpf_ckl.wfp.driver import CtdpfCklWfpDataSetDriver
from mi.dataset.parser.ctdpf_ckl_wfp import CtdpfCklWfpParserDataParticle, DataParticleType
from mi.dataset.parser.wfp_c_file_common import StateKey
# Fill in driver details
DataSetTestCase.initialize(
driver_module='mi.dataset.driver.ctdpf_ckl.wfp.driver',
driver_class='CtdpfCklWfpDataSetDriver',
agent_resource_id = '123xyz',
agent_name = 'Agent007',
agent_packet_config = CtdpfCklWfpDataSetDriver.stream_config(),
startup_config = {
DataSourceConfigKey.RESOURCE_ID: 'ctdpf_ckl_wfp',
DataSourceConfigKey.HARVESTER:
{
DataSetDriverConfigKeys.DIRECTORY: '/tmp/dsatest',
DataSetDriverConfigKeys.PATTERN: 'C*.DAT',
DataSetDriverConfigKeys.FREQUENCY: 1,
},
DataSourceConfigKey.PARSER: {}
}
)
# The integration and qualification tests generated here are suggested tests,
# but may not be enough to fully test your driver. Additional tests should be
# written as needed.
###############################################################################
# INTEGRATION TESTS #
# Device specific integration tests are for #
# testing device specific capabilities #
###############################################################################
@attr('INT', group='mi')
###############################################################################
# QUALIFICATION TESTS #
# Device specific qualification tests are for #
# testing device specific capabilities #
###############################################################################
@attr('QUAL', group='mi') | 39.543417 | 122 | 0.6393 | """
@package mi.dataset.driver.ctdpf_ckl.wfp.test.test_driver
@file marine-integrations/mi/dataset/driver/ctdpf_ckl/wfp/driver.py
@author cgoodrich
@brief Test cases for ctdpf_ckl_wfp driver
USAGE:
Make tests verbose and provide stdout
* From the IDK
$ bin/dsa/test_driver
$ bin/dsa/test_driver -i [-t testname]
$ bin/dsa/test_driver -q [-t testname]
"""
__author__ = 'cgoodrich'
__license__ = 'Apache 2.0'
import unittest
from nose.plugins.attrib import attr
from mock import Mock
from pyon.agent.agent import ResourceAgentState
from interface.objects import ResourceAgentErrorEvent
from mi.core.log import get_logger ; log = get_logger()
from mi.idk.exceptions import SampleTimeout
from mi.idk.dataset.unit_test import DataSetTestCase
from mi.idk.dataset.unit_test import DataSetIntegrationTestCase
from mi.idk.dataset.unit_test import DataSetQualificationTestCase
from mi.dataset.dataset_driver import DriverParameter
from mi.dataset.dataset_driver import DataSourceConfigKey, DataSetDriverConfigKeys
from mi.dataset.driver.ctdpf_ckl.wfp.driver import CtdpfCklWfpDataSetDriver
from mi.dataset.parser.ctdpf_ckl_wfp import CtdpfCklWfpParserDataParticle, DataParticleType
from mi.dataset.parser.wfp_c_file_common import StateKey
# Fill in driver details
DataSetTestCase.initialize(
driver_module='mi.dataset.driver.ctdpf_ckl.wfp.driver',
driver_class='CtdpfCklWfpDataSetDriver',
agent_resource_id = '123xyz',
agent_name = 'Agent007',
agent_packet_config = CtdpfCklWfpDataSetDriver.stream_config(),
startup_config = {
DataSourceConfigKey.RESOURCE_ID: 'ctdpf_ckl_wfp',
DataSourceConfigKey.HARVESTER:
{
DataSetDriverConfigKeys.DIRECTORY: '/tmp/dsatest',
DataSetDriverConfigKeys.PATTERN: 'C*.DAT',
DataSetDriverConfigKeys.FREQUENCY: 1,
},
DataSourceConfigKey.PARSER: {}
}
)
# The integration and qualification tests generated here are suggested tests,
# but may not be enough to fully test your driver. Additional tests should be
# written as needed.
###############################################################################
# INTEGRATION TESTS #
# Device specific integration tests are for #
# testing device specific capabilities #
###############################################################################
@attr('INT', group='mi')
class IntegrationTest(DataSetIntegrationTestCase):
def test_get(self):
"""
Test that we can get data from files. Verify that the driver
sampling can be started and stopped
"""
self.clear_sample_data()
# Start sampling and watch for an exception
self.driver.start_sampling()
self.clear_async_data()
self.create_sample_data('first.DAT', "C0000001.DAT")
self.assert_data(None, 'first.result.yml', count=4, timeout=10)
self.clear_async_data()
self.create_sample_data('second.DAT', "C0000002.DAT")
self.assert_data(None, 'second.result.yml', count=7, timeout=10)
def test_stop_resume(self):
"""
Test the ability to stop and restart the process
"""
path_1 = self.create_sample_data('first.DAT', "C0000001.DAT")
path_2 = self.create_sample_data('second.DAT', "C0000002.DAT")
# Create and store the new driver state
state = {
'C0000001.DAT': self.get_file_state(path_1, True, 33),
'C0000002.DAT': self.get_file_state(path_2, False, 33)
}
# only the position field in the parser state is initialized in get_file_state, need to add the other state fields
state['C0000001.DAT']['parser_state'][StateKey.RECORDS_READ] = 3
state['C0000001.DAT']['parser_state'][StateKey.METADATA_SENT] = True
state['C0000002.DAT']['parser_state'][StateKey.RECORDS_READ] = 3
state['C0000002.DAT']['parser_state'][StateKey.METADATA_SENT] = True
self.driver = self._get_driver_object(memento=state)
# create some data to parse
self.clear_async_data()
self.driver.start_sampling()
# verify data is produced
self.assert_data(None, 'partial_second.result.yml', count=3, timeout=10)
def test_stop_start_resume(self):
"""
Test the ability to stop and restart sampling, ingesting files in the
correct order
"""
# create some data to parse
self.clear_async_data()
self.driver.start_sampling()
self.create_sample_data('first.DAT', "C0000001.DAT")
self.create_sample_data('second.DAT', "C0000002.DAT")
self.assert_data(None, 'first.result.yml', count=4, timeout=10)
self.assert_file_ingested("C0000001.DAT")
self.assert_file_not_ingested("C0000002.DAT")
self.driver.stop_sampling()
self.driver.start_sampling()
self.assert_data(None, 'second.result.yml', count=7, timeout=10)
self.assert_file_ingested("C0000002.DAT")
def test_sample_exception_empty(self):
"""
Test a case that should produce a sample exception and confirm the
sample exception occurs. In this case an empty file will produce a sample exception.
"""
self.clear_async_data()
config = self._driver_config()['startup_config']['harvester']['pattern']
filename = config.replace("*", "foo")
self.create_sample_data(filename)
# Start sampling and watch for an exception
self.driver.start_sampling()
# an event catches the sample exception
self.assert_event('ResourceAgentErrorEvent')
self.assert_file_ingested(filename)
def test_sample_exception_num_samples(self):
"""
Test a case that should produce a sample exception and confirm the
sample exception occurs. In this case an empty file will produce a sample exception.
"""
self.clear_async_data()
self.create_sample_data('bad_num_samples.DAT', 'C0000001.DAT')
# Start sampling and watch for an exception
self.driver.start_sampling()
# an event catches the sample exception
self.assert_event('ResourceAgentErrorEvent')
self.assert_file_ingested('C0000001.DAT')
def test_timestamp_only(self):
"""
Test a case that should produce a sample exception and confirm the
sample exception occurs. In this case an empty file will produce a sample exception.
"""
self.clear_async_data()
self.create_sample_data('ts_only.DAT', 'C0000001.DAT')
# Start sampling and watch for an exception
self.driver.start_sampling()
self.assert_data(None, 'ts_only.result.yml', count=1, timeout=10)
self.assert_file_ingested('C0000001.DAT')
def test_error(self):
self.create_sample_data('C0000034.DAT')
self.driver.start_sampling()
self.assert_data(None, count=4, timeout=10)
###############################################################################
# QUALIFICATION TESTS #
# Device specific qualification tests are for #
# testing device specific capabilities #
###############################################################################
@attr('QUAL', group='mi')
class QualificationTest(DataSetQualificationTestCase):
def assert_all_queue_empty(self):
"""
Assert the sample queue for all 3 data streams is empty
"""
self.assert_sample_queue_size(DataParticleType.METADATA, 0)
self.assert_sample_queue_size(DataParticleType.DATA, 0)
def test_publish_path(self):
"""
Setup an agent/driver/harvester/parser and verify that data is
published out the agent
"""
self.create_sample_data('first.DAT', 'C0000001.DAT')
self.assert_initialize()
# Verify we get one sample
try:
result = self.data_subscribers.get_samples(DataParticleType.METADATA, 1)
log.debug("First RESULT: %s", result)
result_2 = self.data_subscribers.get_samples(DataParticleType.DATA, 3)
log.debug("Second RESULT: %s", result_2)
result.extend(result_2)
log.debug("Extended RESULT: %s", result)
# Verify values
self.assert_data_values(result, 'first.result.yml')
except Exception as e:
log.error("Exception trapped: %s", e)
self.fail("Sample timeout.")
def test_large_import(self):
"""
Test importing a large number of samples from the file at once
"""
self.create_sample_data('C0000038.DAT')
self.assert_initialize()
# get results for each of the data particle streams
result1 = self.get_samples(DataParticleType.METADATA,1,10)
result2 = self.get_samples(DataParticleType.DATA,270,40)
def test_stop_start(self):
"""
Test the agents ability to start data flowing, stop, then restart
at the correct spot.
"""
log.info("CONFIG: %s", self._agent_config())
self.create_sample_data('first.DAT', "C0000001.DAT")
self.assert_initialize(final_state=ResourceAgentState.COMMAND)
# Slow down processing to 1 per second to give us time to stop
self.dataset_agent_client.set_resource({DriverParameter.RECORDS_PER_SECOND: 1})
self.assert_start_sampling()
try:
# Read the first file and verify the data
result = self.get_samples(DataParticleType.METADATA)
result2 = self.get_samples(DataParticleType.DATA, 3)
result.extend(result2)
log.debug("RESULT: %s", result)
# Verify values
self.assert_data_values(result, 'first.result.yml')
self.assert_all_queue_empty()
self.create_sample_data('second.DAT', "C0000002.DAT")
# Now read the first three records (1 metadata, 2 data) of the second file then stop
result = self.get_samples(DataParticleType.METADATA)
result2 = self.get_samples(DataParticleType.DATA, 2)
result.extend(result2)
log.debug("got result 1 %s", result)
self.assert_stop_sampling()
self.assert_all_queue_empty()
# Restart sampling and ensure we get the last 4 records of the file
self.assert_start_sampling()
result3 = self.get_samples(DataParticleType.DATA, 4)
log.debug("got result 2 %s", result3)
result.extend(result3)
self.assert_data_values(result, 'second.result.yml')
self.assert_all_queue_empty()
except SampleTimeout as e:
log.error("Exception trapped: %s", e, exc_info=True)
self.fail("Sample timeout.")
def test_shutdown_restart(self):
"""
Test a full stop of the dataset agent, then restart the agent
and confirm it restarts at the correct spot.
"""
log.info("CONFIG: %s", self._agent_config())
self.create_sample_data('first.DAT', "C0000001.DAT")
self.assert_initialize(final_state=ResourceAgentState.COMMAND)
# Slow down processing to 1 per second to give us time to stop
self.dataset_agent_client.set_resource({DriverParameter.RECORDS_PER_SECOND: 1})
self.assert_start_sampling()
try:
# Read the first file and verify the data
result = self.get_samples(DataParticleType.METADATA)
result2 = self.get_samples(DataParticleType.DATA, 3)
result.extend(result2)
log.debug("RESULT: %s", result)
# Verify values
self.assert_data_values(result, 'first.result.yml')
self.assert_all_queue_empty()
self.create_sample_data('second.DAT', "C0000002.DAT")
# Now read the first three records (1 metadata, 2 data) of the second file then stop
result = self.get_samples(DataParticleType.METADATA)
result2 = self.get_samples(DataParticleType.DATA, 2)
result.extend(result2)
log.debug("got result 1 %s", result)
self.assert_stop_sampling()
self.assert_all_queue_empty()
# stop the agent
self.stop_dataset_agent_client()
# re-start the agent
self.init_dataset_agent_client()
#re-initialize
self.assert_initialize(final_state=ResourceAgentState.COMMAND)
# Restart sampling and ensure we get the last 4 records of the file
self.assert_start_sampling()
result3 = self.get_samples(DataParticleType.DATA, 4)
log.debug("got result 2 %s", result3)
result.extend(result3)
self.assert_data_values(result, 'second.result.yml')
self.assert_all_queue_empty()
except SampleTimeout as e:
log.error("Exception trapped: %s", e, exc_info=True)
self.fail("Sample timeout.")
def test_parser_exception(self):
"""
Test an exception is raised after the driver is started during
record parsing.
"""
self.clear_sample_data()
self.create_sample_data('bad_num_samples.DAT', 'C0000001.DAT')
self.create_sample_data('first.DAT', 'C0000002.DAT')
self.assert_initialize()
self.event_subscribers.clear_events()
result = self.get_samples(DataParticleType.METADATA)
result1 = self.get_samples(DataParticleType.DATA, 3)
result.extend(result1)
self.assert_data_values(result, 'first.result.yml')
self.assert_all_queue_empty();
# Verify an event was raised and we are in our retry state
self.assert_event_received(ResourceAgentErrorEvent, 10)
self.assert_state_change(ResourceAgentState.STREAMING, 10) | 137 | 10,998 | 44 |
2e3208da4f7b896f8fbaa40603f80a8420f457dd | 804 | py | Python | interpretability/explanation_methods/__init__.py | moboehle/CoDA-Nets | 9f43f286d8a662078e21dc401a315e257da21c0d | [
"BSD-2-Clause",
"0BSD"
] | 25 | 2021-06-14T09:14:12.000Z | 2022-03-31T14:27:09.000Z | interpretability/explanation_methods/__init__.py | moboehle/B-cos | 5f9218f6773534c80367793d1cd767742869764a | [
"BSD-2-Clause",
"0BSD"
] | 1 | 2021-07-05T12:31:58.000Z | 2021-07-05T12:31:58.000Z | interpretability/explanation_methods/__init__.py | moboehle/CoDA-Nets | 9f43f286d8a662078e21dc401a315e257da21c0d | [
"BSD-2-Clause",
"0BSD"
] | 3 | 2021-07-10T09:37:54.000Z | 2022-02-18T21:02:20.000Z | from interpretability.explanation_methods.explainers.rise import RISE
from interpretability.explanation_methods.explainers.lime import Lime
from interpretability.explanation_methods.explainers.occlusion import Occlusion
from interpretability.explanation_methods.explainers.captum import GradCam, GB, IxG, Grad, DeepLIFT, IntGrad
from interpretability.explanation_methods.explanation_configs import explainer_configs
explainer_map = {
"Ours": lambda x: x,
"RISE": RISE,
"Occlusion": Occlusion,
"GCam": GradCam,
"LIME": Lime,
"IntGrad": IntGrad,
"GB": GB,
"IxG": IxG,
"Grad": Grad,
"DeepLIFT": DeepLIFT
}
| 33.5 | 108 | 0.771144 | from interpretability.explanation_methods.explainers.rise import RISE
from interpretability.explanation_methods.explainers.lime import Lime
from interpretability.explanation_methods.explainers.occlusion import Occlusion
from interpretability.explanation_methods.explainers.captum import GradCam, GB, IxG, Grad, DeepLIFT, IntGrad
from interpretability.explanation_methods.explanation_configs import explainer_configs
explainer_map = {
"Ours": lambda x: x,
"RISE": RISE,
"Occlusion": Occlusion,
"GCam": GradCam,
"LIME": Lime,
"IntGrad": IntGrad,
"GB": GB,
"IxG": IxG,
"Grad": Grad,
"DeepLIFT": DeepLIFT
}
def get_explainer(trainer, explainer_name, config_name):
return explainer_map[explainer_name](trainer, **explainer_configs[explainer_name][config_name])
| 135 | 0 | 23 |
4f9d0c16553dd37369561237a7161761ce921494 | 14,968 | py | Python | repartition_experiments/algorithms/utils.py | big-data-lab-team/repartition_experiments | 71cff469d2036f3e325ea3fc15b9f686c794f8d8 | [
"MIT"
] | null | null | null | repartition_experiments/algorithms/utils.py | big-data-lab-team/repartition_experiments | 71cff469d2036f3e325ea3fc15b9f686c794f8d8 | [
"MIT"
] | 1 | 2020-07-05T02:06:19.000Z | 2020-09-25T16:20:39.000Z | repartition_experiments/algorithms/utils.py | GTimothee/repartition_experiments | 71cff469d2036f3e325ea3fc15b9f686c794f8d8 | [
"MIT"
] | null | null | null | import operator, logging, math, psutil
from enum import Enum
from repartition_experiments.file_formats.hdf5 import HDF5_manager
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
logger = logging.getLogger(__name__)
def get_volumes(R, B):
""" Returns a dictionary mapping each buffer (numeric) index to a Volume object containing its coordinates in R.
Arguments:
----------
R: original array
B: buffer shape
"""
buffers_partition = get_partition(R, B)
return buffers_partition, get_named_volumes(buffers_partition, B)
def hypercubes_overlap(hypercube1, hypercube2):
""" Evaluate if two hypercubes cross each other.
"""
if not isinstance(hypercube1, Volume) or \
not isinstance(hypercube2, Volume):
raise TypeError()
lowercorner1, uppercorner1 = hypercube1.get_corners()
lowercorner2, uppercorner2 = hypercube2.get_corners()
for i in range(len(uppercorner1)):
if uppercorner1[i] <= lowercorner2[i] or \
uppercorner2[i] <= lowercorner1[i]:
return False
return True
def get_blocks_shape(big_array, small_array):
""" Return the number of small arrays in big array in all dimensions as a shape.
"""
return tuple([int(b/s) for b, s in zip(big_array, small_array)])
def get_crossed_outfiles(buffer_of_interest, outfiles_volumes):
""" Returns list of output files that are crossing buffer at buffer_index.
Arguments:
----------
outfiles_volumes: dict of volumes representing the output files, indexed in storage order.
"""
crossing = list()
for outfile in outfiles_volumes.values():
if hypercubes_overlap(buffer_of_interest, outfile):
crossing.append(outfile) # we add a Volume obj
return crossing
def merge_volumes(volume1, volume2):
""" Merge two volumes into one.
"""
if not isinstance(volume1, Volume) or \
not isinstance(volume2, Volume):
raise TypeError()
lowercorner1, uppercorner1 = volume1.get_corners()
lowercorner2, uppercorner2 = volume2.get_corners()
lowercorner = (min(lowercorner1[0], lowercorner2[0]),
min(lowercorner1[1], lowercorner2[1]),
min(lowercorner1[2], lowercorner2[2]))
uppercorner = (max(uppercorner1[0], uppercorner2[0]),
max(uppercorner1[1], uppercorner2[1]),
max(uppercorner1[2], uppercorner2[2]))
return Volume('0_merged', lowercorner, uppercorner)
def included_in(volume, outfile):
""" Alias of hypercubes_overlap.
We do not verify that it is included but by definition
of the problem if volume crosses outfile then volume in outfile.
Arguments:
----------
volume: Volume in buffer
outfile: Volume representing an output file
"""
if not isinstance(volume, Volume) or \
not isinstance(outfile, Volume):
raise TypeError()
volume_bl, volume_ur = volume.get_corners() # ur=upper right, bl=bottom left
outfile_bl, outfile_ur = outfile.get_corners()
nb_dims = len(outfile_bl)
nb_matching_dims = 0
for dim in range(nb_dims):
out_min, out_max = outfile_bl[dim], outfile_ur[dim]
volume_min, volume_max = volume_bl[dim], volume_ur[dim]
if (volume_min >= out_min and volume_min <= out_max) and (volume_max >= out_min and volume_max <= out_max):
nb_matching_dims += 1
if nb_matching_dims == nb_dims:
return True
return False
def add_to_array_dict(array_dict, outfile, volume):
""" Add volume information to dictionary associating output file index to
Arguments:
----------
outfile: outfile volume
volume: volume from buffer
"""
if (not isinstance(outfile.index, int)
or not isinstance(volume, Volume)
or not isinstance(outfile, Volume)):
raise TypeError()
if not outfile.index in array_dict.keys():
array_dict[outfile.index] = list()
array_dict[outfile.index].append(volume)
def clean_arrays_dict(arrays_dict):
""" From a dictionary of Volumes, creates a dictionary of list of slices.
The new arrays_dict associates each output file to each volume that must be written at a time.
"""
for k in arrays_dict.keys():
volumes_list = arrays_dict[k]
arrays_dict[k] = [convert_Volume_to_slices(v) for v in volumes_list]
def get_overlap_subarray(hypercube1, hypercube2):
""" Find the intersection of both files.
Refactor of hypercubes_overlap to return the overlap subarray
Returns:
--------
pair of corners of the subarray
See also:
---------
utils.hypercubes_overlap
"""
if not isinstance(hypercube1, Volume) or \
not isinstance(hypercube2, Volume):
raise TypeError()
lowercorner1, uppercorner1 = hypercube1.get_corners()
lowercorner2, uppercorner2 = hypercube2.get_corners()
nb_dims = len(uppercorner1)
subarray_lowercorner = list()
subarray_uppercorner = list()
for i in range(nb_dims):
subarray_lowercorner.append(max(lowercorner1[i], lowercorner2[i]))
subarray_uppercorner.append(min(uppercorner1[i], uppercorner2[i]))
# print(f"Overlap subarray : {subarray_lowercorner[0]}:{subarray_uppercorner[0]}, {subarray_lowercorner[1]}:{subarray_uppercorner[1]}, {subarray_lowercorner[2]}:{subarray_uppercorner[2]}")
return (subarray_lowercorner, subarray_uppercorner)
def get_named_volumes(blocks_partition, block_shape):
""" Return the coordinates of all entities of shape block shape in the reconstructed image.
The first entity is placed at the origin of the base.
Returns:
---------
d: dictionary mapping each buffer numeric index to a Volume representing its coordinates
Arguments:
----------
blocks_partition: Number of blocks in each dimension. Shape of the reconstructed image in terms of the blocks considered.
block_shape: shape of one block, all blocks having the same shape
"""
# logger.debug("== Function == get_named_volumes")
d = dict()
# logger.debug("[Arg] blocks_partition: %s", blocks_partition)
# logger.debug("[Arg] block_shape: %s", block_shape)
for i in range(blocks_partition[0]):
for j in range(blocks_partition[1]):
for k in range(blocks_partition[2]):
bl_corner = (block_shape[0] * i,
block_shape[1] * j,
block_shape[2] * k)
tr_corner = (block_shape[0] * (i+1),
block_shape[1] * (j+1),
block_shape[2] * (k+1))
index = _3d_to_numeric_pos((i, j, k), blocks_partition, order='C')
d[index] = Volume(index, bl_corner, tr_corner)
# logger.debug("Indices of names volumes found: %s", d.keys())
# logger.debug("End\n")
return d
def apply_merge(volume, volumes, merge_directions):
""" Merge volume with other volumes from volumes list in the merge directions.
Arguments:
----------
volume: volume to merge
volumes: list of volumes
merge_directions: indicates neighbours to merge with
"""
import copy
logger.debug("\t== Function == apply_merge")
p1, p2 = volume.get_corners()
logger.debug("\tTargetting volume with low corner %s", p1)
if len(merge_directions) == 1:
if Axes.k in merge_directions:
p1_target = list(copy.deepcopy(p1))
p1_target[Axes.k.value] = p2[Axes.k.value]
new_volume = get_new_volume(volume, tuple(p1_target))
elif Axes.j in merge_directions:
p1_target = list(copy.deepcopy(p1))
p1_target[Axes.j.value] = p2[Axes.j.value]
new_volume = get_new_volume(volume, tuple(p1_target))
elif Axes.i in merge_directions:
p1_target = list(copy.deepcopy(p1))
p1_target[Axes.i.value] = p2[Axes.i.value]
new_volume = get_new_volume(volume, tuple(p1_target))
elif len(merge_directions) == 2:
logger.debug("\tMerge directions: %s", merge_directions)
axis1, axis2 = merge_directions
p1_target = list(copy.deepcopy(p1))
p1_target[axis1.value] = p2[axis1.value]
volume_axis1 = get_new_volume(volume, tuple(p1_target))
new_volume_axis1 = apply_merge(volume_axis1, volumes, [axis2])
new_volume_axis2 = apply_merge(volume, volumes, [axis2])
new_volume = merge_volumes(new_volume_axis1, new_volume_axis2)
elif len(merge_directions) == 3:
logger.debug("\tMerge directions %s", merge_directions)
axis1, axis2, axis3 = merge_directions
p1_target = list(copy.deepcopy(p1))
p1_target[axis1.value] = p2[axis1.value]
volume_axis1 = get_new_volume(volume, tuple(p1_target))
new_vol1 = apply_merge(volume, volumes, [axis2, axis3])
new_vol2 = apply_merge(volume_axis1, volumes, [axis2, axis3])
new_volume = merge_volumes(new_vol1, new_vol2)
else:
raise ValueError()
logger.debug("\tEnd")
return new_volume
def numeric_to_3d_pos(numeric_pos, blocks_partition, order):
""" Convert numeric block position into its 3d position in the array in a given storage order.
See also:
--------
get_partition
"""
if order == 'C':
nb_blocks_per_row = blocks_partition[2]
nb_blocks_per_slice = blocks_partition[1] * blocks_partition[2]
else:
raise ValueError("unsupported")
i = math.floor(numeric_pos / nb_blocks_per_slice)
numeric_pos -= i * nb_blocks_per_slice
j = math.floor(numeric_pos / nb_blocks_per_row)
numeric_pos -= j * nb_blocks_per_row
k = numeric_pos
return (i, j, k)
def _3d_to_numeric_pos(_3d_pos, blocks_partition, order):
""" Convert 3d block position into its numeric position in a given storage order.
See also:
--------
get_partition
"""
if order == 'C':
nb_blocks_per_row = blocks_partition[2]
nb_blocks_per_slice = blocks_partition[1] * blocks_partition[2]
else:
raise ValueError("unsupported")
return (_3d_pos[0] * nb_blocks_per_slice) + \
(_3d_pos[1] * nb_blocks_per_row) + _3d_pos[2]
def get_partition(array_shape, chunk_shape):
""" Returns partition of array by chunks.
Arguments:
----------
array_shape: shape of input array
chunk_shape: shape of one chunk
Returns:
--------
the partition as a tuple
"""
chunks = chunk_shape
# logger.debug(f'Chunks for get_array_block_dims: {chunks}')
if not len(array_shape) == len(chunks):
raise ValueError(
"chunks and shape should have the same dimension",
array_shape,
chunks)
return tuple([int(s / c) for s, c in zip(array_shape, chunks)])
def to_basis(v, basis):
""" Create a new volume from volume v with basis changed from R to basis
Arguments:
----------
v: Volume obj
basis: Volume obj
"""
v2 = Volume(0, v.p1, v.p2)
offset = ((-1) * basis.p1[0], (-1) * basis.p1[1], (-1) * basis.p1[2])
v2.add_offset(offset)
# sanity check
p1, p2 = v2.get_corners()
for p in [p1, p2]:
for e in p:
if e < 0:
print("Volume in basis R:")
v.print()
print("Basis:")
basis.print()
raise ValueError("An error occured while changing from basis R to new basis")
return v2 | 32.53913 | 205 | 0.620657 | import operator, logging, math, psutil
from enum import Enum
from repartition_experiments.file_formats.hdf5 import HDF5_manager
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
logger = logging.getLogger(__name__)
class Axes(Enum):
i = 0
j = 1
k = 2
class Volume:
def __init__(self, index, p1, p2):
if (not isinstance(p1, tuple)
or not isinstance(p2, tuple)):
raise TypeError()
self.index = index
self.p1 = p1 # bottom left corner
self.p2 = p2 # top right corner
def get_shape(self):
return (self.p2[0] - self.p1[0], self.p2[1] - self.p1[1], self.p2[2] - self.p1[2])
def get_slices(self):
return ((self.p1[0], self.p2[0]), (self.p1[1], self.p2[1]), (self.p1[2], self.p2[2]))
def add_offset(self, offset):
"""
offset: a tuple
"""
self.p1 = self._add_offset(self.p1, offset)
self.p2 = self._add_offset(self.p2, offset)
def _add_offset(self, p, offset):
if isinstance(offset, list):
offset = tuple(offset)
elif not isinstance(offset, tuple):
raise TypeError("Expected tuple")
return tuple(map(operator.add, p, offset))
def get_corners(self):
return (self.p1, self.p2)
def equals(self, volume):
if not self.index == volume.index:
return False
if not self.p1 == volume.p1:
return False
if not self.p2 == volume.p2:
return False
return True
def print(self):
print(f"Volume name: {self.index}, ({self.p1[0]}:{self.p2[0]},{self.p1[1]}:{self.p2[1]},{self.p1[2]}:{self.p2[2]}), shape:({self.p2[0]-self.p1[0]},{self.p2[1]-self.p1[1]},{self.p2[2]-self.p1[2]})")
def get_theta(buffers_volumes, buffer_index, _3d_index, O, B):
T = list()
Cs = list()
for dim in range(len(buffers_volumes[buffer_index].p1)):
if B[dim] < O[dim]:
C = 0
else:
C = ((_3d_index[dim]+1) * B[dim]) % O[dim]
if C == 0 and B[dim] != O[dim]: # particular case
C = O[dim]
if C < 0:
raise ValueError("modulo should not return negative value")
Cs.append(C)
T.append(B[dim] - C)
return T, Cs
def get_opened_files():
proc = psutil.Process()
print(f"Number of opened files: {len(proc.open_files())}")
def get_volumes(R, B):
""" Returns a dictionary mapping each buffer (numeric) index to a Volume object containing its coordinates in R.
Arguments:
----------
R: original array
B: buffer shape
"""
buffers_partition = get_partition(R, B)
return buffers_partition, get_named_volumes(buffers_partition, B)
def hypercubes_overlap(hypercube1, hypercube2):
""" Evaluate if two hypercubes cross each other.
"""
if not isinstance(hypercube1, Volume) or \
not isinstance(hypercube2, Volume):
raise TypeError()
lowercorner1, uppercorner1 = hypercube1.get_corners()
lowercorner2, uppercorner2 = hypercube2.get_corners()
for i in range(len(uppercorner1)):
if uppercorner1[i] <= lowercorner2[i] or \
uppercorner2[i] <= lowercorner1[i]:
return False
return True
def get_blocks_shape(big_array, small_array):
""" Return the number of small arrays in big array in all dimensions as a shape.
"""
return tuple([int(b/s) for b, s in zip(big_array, small_array)])
def get_crossed_outfiles(buffer_of_interest, outfiles_volumes):
""" Returns list of output files that are crossing buffer at buffer_index.
Arguments:
----------
outfiles_volumes: dict of volumes representing the output files, indexed in storage order.
"""
crossing = list()
for outfile in outfiles_volumes.values():
if hypercubes_overlap(buffer_of_interest, outfile):
crossing.append(outfile) # we add a Volume obj
return crossing
def merge_volumes(volume1, volume2):
""" Merge two volumes into one.
"""
if not isinstance(volume1, Volume) or \
not isinstance(volume2, Volume):
raise TypeError()
lowercorner1, uppercorner1 = volume1.get_corners()
lowercorner2, uppercorner2 = volume2.get_corners()
lowercorner = (min(lowercorner1[0], lowercorner2[0]),
min(lowercorner1[1], lowercorner2[1]),
min(lowercorner1[2], lowercorner2[2]))
uppercorner = (max(uppercorner1[0], uppercorner2[0]),
max(uppercorner1[1], uppercorner2[1]),
max(uppercorner1[2], uppercorner2[2]))
return Volume('0_merged', lowercorner, uppercorner)
def included_in(volume, outfile):
""" Alias of hypercubes_overlap.
We do not verify that it is included but by definition
of the problem if volume crosses outfile then volume in outfile.
Arguments:
----------
volume: Volume in buffer
outfile: Volume representing an output file
"""
if not isinstance(volume, Volume) or \
not isinstance(outfile, Volume):
raise TypeError()
volume_bl, volume_ur = volume.get_corners() # ur=upper right, bl=bottom left
outfile_bl, outfile_ur = outfile.get_corners()
nb_dims = len(outfile_bl)
nb_matching_dims = 0
for dim in range(nb_dims):
out_min, out_max = outfile_bl[dim], outfile_ur[dim]
volume_min, volume_max = volume_bl[dim], volume_ur[dim]
if (volume_min >= out_min and volume_min <= out_max) and (volume_max >= out_min and volume_max <= out_max):
nb_matching_dims += 1
if nb_matching_dims == nb_dims:
return True
return False
def add_to_array_dict(array_dict, outfile, volume):
""" Add volume information to dictionary associating output file index to
Arguments:
----------
outfile: outfile volume
volume: volume from buffer
"""
if (not isinstance(outfile.index, int)
or not isinstance(volume, Volume)
or not isinstance(outfile, Volume)):
raise TypeError()
if not outfile.index in array_dict.keys():
array_dict[outfile.index] = list()
array_dict[outfile.index].append(volume)
def convert_Volume_to_slices(v):
if not isinstance(v, Volume):
raise TypeError()
p1, p2 = v.get_corners()
return tuple([slice(p1[dim], p2[dim], None) for dim in range(len(p1))])
def clean_arrays_dict(arrays_dict):
""" From a dictionary of Volumes, creates a dictionary of list of slices.
The new arrays_dict associates each output file to each volume that must be written at a time.
"""
for k in arrays_dict.keys():
volumes_list = arrays_dict[k]
arrays_dict[k] = [convert_Volume_to_slices(v) for v in volumes_list]
def get_overlap_subarray(hypercube1, hypercube2):
""" Find the intersection of both files.
Refactor of hypercubes_overlap to return the overlap subarray
Returns:
--------
pair of corners of the subarray
See also:
---------
utils.hypercubes_overlap
"""
if not isinstance(hypercube1, Volume) or \
not isinstance(hypercube2, Volume):
raise TypeError()
lowercorner1, uppercorner1 = hypercube1.get_corners()
lowercorner2, uppercorner2 = hypercube2.get_corners()
nb_dims = len(uppercorner1)
subarray_lowercorner = list()
subarray_uppercorner = list()
for i in range(nb_dims):
subarray_lowercorner.append(max(lowercorner1[i], lowercorner2[i]))
subarray_uppercorner.append(min(uppercorner1[i], uppercorner2[i]))
# print(f"Overlap subarray : {subarray_lowercorner[0]}:{subarray_uppercorner[0]}, {subarray_lowercorner[1]}:{subarray_uppercorner[1]}, {subarray_lowercorner[2]}:{subarray_uppercorner[2]}")
return (subarray_lowercorner, subarray_uppercorner)
def get_named_volumes(blocks_partition, block_shape):
""" Return the coordinates of all entities of shape block shape in the reconstructed image.
The first entity is placed at the origin of the base.
Returns:
---------
d: dictionary mapping each buffer numeric index to a Volume representing its coordinates
Arguments:
----------
blocks_partition: Number of blocks in each dimension. Shape of the reconstructed image in terms of the blocks considered.
block_shape: shape of one block, all blocks having the same shape
"""
# logger.debug("== Function == get_named_volumes")
d = dict()
# logger.debug("[Arg] blocks_partition: %s", blocks_partition)
# logger.debug("[Arg] block_shape: %s", block_shape)
for i in range(blocks_partition[0]):
for j in range(blocks_partition[1]):
for k in range(blocks_partition[2]):
bl_corner = (block_shape[0] * i,
block_shape[1] * j,
block_shape[2] * k)
tr_corner = (block_shape[0] * (i+1),
block_shape[1] * (j+1),
block_shape[2] * (k+1))
index = _3d_to_numeric_pos((i, j, k), blocks_partition, order='C')
d[index] = Volume(index, bl_corner, tr_corner)
# logger.debug("Indices of names volumes found: %s", d.keys())
# logger.debug("End\n")
return d
def apply_merge(volume, volumes, merge_directions):
""" Merge volume with other volumes from volumes list in the merge directions.
Arguments:
----------
volume: volume to merge
volumes: list of volumes
merge_directions: indicates neighbours to merge with
"""
def get_new_volume(volume, lowcorner):
v2 = get_volume(lowcorner)
if v2 != None:
return merge_volumes(volume, v2)
else:
_id = volume.index.split('_')[0]
volume.index = str(_id) + '_merged'
return volume
def get_volume(lowcorner):
if not isinstance(lowcorner, tuple):
raise TypeError() # required for "=="
for i in range(len(volumes)):
v = volumes[i]
if v.p1 == lowcorner:
logger.debug("\tMerging volume with low corner %s", v.p1)
return volumes.pop(i)
logger.warning("\tNo volume to merge with")
return None
import copy
logger.debug("\t== Function == apply_merge")
p1, p2 = volume.get_corners()
logger.debug("\tTargetting volume with low corner %s", p1)
if len(merge_directions) == 1:
if Axes.k in merge_directions:
p1_target = list(copy.deepcopy(p1))
p1_target[Axes.k.value] = p2[Axes.k.value]
new_volume = get_new_volume(volume, tuple(p1_target))
elif Axes.j in merge_directions:
p1_target = list(copy.deepcopy(p1))
p1_target[Axes.j.value] = p2[Axes.j.value]
new_volume = get_new_volume(volume, tuple(p1_target))
elif Axes.i in merge_directions:
p1_target = list(copy.deepcopy(p1))
p1_target[Axes.i.value] = p2[Axes.i.value]
new_volume = get_new_volume(volume, tuple(p1_target))
elif len(merge_directions) == 2:
logger.debug("\tMerge directions: %s", merge_directions)
axis1, axis2 = merge_directions
p1_target = list(copy.deepcopy(p1))
p1_target[axis1.value] = p2[axis1.value]
volume_axis1 = get_new_volume(volume, tuple(p1_target))
new_volume_axis1 = apply_merge(volume_axis1, volumes, [axis2])
new_volume_axis2 = apply_merge(volume, volumes, [axis2])
new_volume = merge_volumes(new_volume_axis1, new_volume_axis2)
elif len(merge_directions) == 3:
logger.debug("\tMerge directions %s", merge_directions)
axis1, axis2, axis3 = merge_directions
p1_target = list(copy.deepcopy(p1))
p1_target[axis1.value] = p2[axis1.value]
volume_axis1 = get_new_volume(volume, tuple(p1_target))
new_vol1 = apply_merge(volume, volumes, [axis2, axis3])
new_vol2 = apply_merge(volume_axis1, volumes, [axis2, axis3])
new_volume = merge_volumes(new_vol1, new_vol2)
else:
raise ValueError()
logger.debug("\tEnd")
return new_volume
def numeric_to_3d_pos(numeric_pos, blocks_partition, order):
""" Convert numeric block position into its 3d position in the array in a given storage order.
See also:
--------
get_partition
"""
if order == 'C':
nb_blocks_per_row = blocks_partition[2]
nb_blocks_per_slice = blocks_partition[1] * blocks_partition[2]
else:
raise ValueError("unsupported")
i = math.floor(numeric_pos / nb_blocks_per_slice)
numeric_pos -= i * nb_blocks_per_slice
j = math.floor(numeric_pos / nb_blocks_per_row)
numeric_pos -= j * nb_blocks_per_row
k = numeric_pos
return (i, j, k)
def _3d_to_numeric_pos(_3d_pos, blocks_partition, order):
""" Convert 3d block position into its numeric position in a given storage order.
See also:
--------
get_partition
"""
if order == 'C':
nb_blocks_per_row = blocks_partition[2]
nb_blocks_per_slice = blocks_partition[1] * blocks_partition[2]
else:
raise ValueError("unsupported")
return (_3d_pos[0] * nb_blocks_per_slice) + \
(_3d_pos[1] * nb_blocks_per_row) + _3d_pos[2]
def get_partition(array_shape, chunk_shape):
""" Returns partition of array by chunks.
Arguments:
----------
array_shape: shape of input array
chunk_shape: shape of one chunk
Returns:
--------
the partition as a tuple
"""
chunks = chunk_shape
# logger.debug(f'Chunks for get_array_block_dims: {chunks}')
if not len(array_shape) == len(chunks):
raise ValueError(
"chunks and shape should have the same dimension",
array_shape,
chunks)
return tuple([int(s / c) for s, c in zip(array_shape, chunks)])
def get_file_manager(file_format):
if file_format == "HDF5":
return HDF5_manager()
else:
print("File format not supported yet. Aborting...")
raise ValueError()
def to_basis(v, basis):
""" Create a new volume from volume v with basis changed from R to basis
Arguments:
----------
v: Volume obj
basis: Volume obj
"""
v2 = Volume(0, v.p1, v.p2)
offset = ((-1) * basis.p1[0], (-1) * basis.p1[1], (-1) * basis.p1[2])
v2.add_offset(offset)
# sanity check
p1, p2 = v2.get_corners()
for p in [p1, p2]:
for e in p:
if e < 0:
print("Volume in basis R:")
v.print()
print("Basis:")
basis.print()
raise ValueError("An error occured while changing from basis R to new basis")
return v2 | 2,702 | 410 | 197 |
7039fa9efaf3eed38d0a11013660a04b20f4c052 | 1,468 | py | Python | PythonLeetcode/周赛/223/summary.py | Lcoderfit/Introduction-to-algotithms | aea2630be6ca2c60186593d6e66b0a59e56dc848 | [
"MIT"
] | 3 | 2018-08-25T16:14:16.000Z | 2019-10-15T22:25:32.000Z | PythonLeetcode/周赛/223/summary.py | Lcoderfit/Introduction-to-algotithms | aea2630be6ca2c60186593d6e66b0a59e56dc848 | [
"MIT"
] | null | null | null | PythonLeetcode/周赛/223/summary.py | Lcoderfit/Introduction-to-algotithms | aea2630be6ca2c60186593d6e66b0a59e56dc848 | [
"MIT"
] | 1 | 2019-10-08T09:03:48.000Z | 2019-10-08T09:03:48.000Z | """
一、1167. 连接棒材的最低费用.py
遇到这种需要排序,然后取最小的两个或者最大的两个数进行操作得到一个结果,这个结果又要与剩下的元素进行同样操作的时候,可以采用堆的数据结构简化
二、1564. 把箱子放进仓库里 I.py
1.当有双指针时,其中一个指针必须遍历完所有元素,则可将while替换为for循环
2.这个跟分发饼干有些类似,关键在于将高低不同的warehouse转换为非递增的序列
例如 3 5 4 2 3, 后一块墙能通过多大的板子受到前一块板子的限制,也就是能通过当前墙面的最大板子为min(前一块墙高度,当前墙高度)
所以可以通过从左到右遍历,两个相邻的墙对比,如果后一块墙要高于前一快,则将后一块改成跟前一块一样高就行,
再对boxes进行排序,这样warehouse和boxes都是有序的,两个有序序列的分发问题,就是分发饼干了,用双指针即可。
三、870. 优势洗牌.py
方法1: 排序+贪心 (单指针插入法): 用一个索引变量来控制元素插入位置,插入一个则变量自增1,如果遇到插入位置已经有元素,
则再向右移动直到找到右边第一个未插入元素的位置
方法2: 排序+贪心 (双表拆合法): 将两种不同性质的元素分拆到两个列表里,然后再根据条件选择其中的一个元素放入结果列表中合并
四、342. 4的幂.py
取模运算定律
五、389. 找不同.py
相同字符进行异或运算则抵消为0,所以如果s比t少了一个字符,则直接将两个字符串的所有字符进行异或运算即可
六、405. 数字转换为十六进制数.py
hex_str = "0123456789abcdef"; 可以使用这个对十进制数转换为十六进制数进行映射简化操作
七、面试题 17.10. 主要元素.py
摩尔投票法:
1.判断票数是否为0,如果为0则取当前元素为结果
2.判断当前结果是否与当前元素相等,相等则将票数+1,否则-1
八、面试题 05.06. 整数转换.py
Python3占用字节数
九、751. IP 到 CIDR.py
start & -start算出来的是start的二进制表示中,最右边的一个“1”及该“1”右边的所有0
用于构建子网的位必须都为0,否则不能用于构建本题中的ip
能够用于构建子网的位不能比n的二进制表示的长度要大,n二进制表示为100,则用于构建子网的位必须 < 3
而由于start & (-start)除了包含用于构建子网的“0”,还包含start二进制表示中最右边的一个1,
故(start & -start).bit_length() <= n.bit_length(), 所以在符合要求情况下,最长的用于构建子网的位为:
min((start & -start).bit_length(), n.bit_length()) - 1
mask = 32 - (min((start & -start).bit_length(), n.bit_length()) - 1) =》
33 - min((start & -start).bit_length(), n.bit_length()
十、1356. 根据数字二进制下 1 的数目排序.py
bin(x).count("1")
""" | 31.234043 | 86 | 0.740463 | """
一、1167. 连接棒材的最低费用.py
遇到这种需要排序,然后取最小的两个或者最大的两个数进行操作得到一个结果,这个结果又要与剩下的元素进行同样操作的时候,可以采用堆的数据结构简化
二、1564. 把箱子放进仓库里 I.py
1.当有双指针时,其中一个指针必须遍历完所有元素,则可将while替换为for循环
2.这个跟分发饼干有些类似,关键在于将高低不同的warehouse转换为非递增的序列
例如 3 5 4 2 3, 后一块墙能通过多大的板子受到前一块板子的限制,也就是能通过当前墙面的最大板子为min(前一块墙高度,当前墙高度)
所以可以通过从左到右遍历,两个相邻的墙对比,如果后一块墙要高于前一快,则将后一块改成跟前一块一样高就行,
再对boxes进行排序,这样warehouse和boxes都是有序的,两个有序序列的分发问题,就是分发饼干了,用双指针即可。
三、870. 优势洗牌.py
方法1: 排序+贪心 (单指针插入法): 用一个索引变量来控制元素插入位置,插入一个则变量自增1,如果遇到插入位置已经有元素,
则再向右移动直到找到右边第一个未插入元素的位置
方法2: 排序+贪心 (双表拆合法): 将两种不同性质的元素分拆到两个列表里,然后再根据条件选择其中的一个元素放入结果列表中合并
四、342. 4的幂.py
取模运算定律
五、389. 找不同.py
相同字符进行异或运算则抵消为0,所以如果s比t少了一个字符,则直接将两个字符串的所有字符进行异或运算即可
六、405. 数字转换为十六进制数.py
hex_str = "0123456789abcdef"; 可以使用这个对十进制数转换为十六进制数进行映射简化操作
七、面试题 17.10. 主要元素.py
摩尔投票法:
1.判断票数是否为0,如果为0则取当前元素为结果
2.判断当前结果是否与当前元素相等,相等则将票数+1,否则-1
八、面试题 05.06. 整数转换.py
Python3占用字节数
九、751. IP 到 CIDR.py
start & -start算出来的是start的二进制表示中,最右边的一个“1”及该“1”右边的所有0
用于构建子网的位必须都为0,否则不能用于构建本题中的ip
能够用于构建子网的位不能比n的二进制表示的长度要大,n二进制表示为100,则用于构建子网的位必须 < 3
而由于start & (-start)除了包含用于构建子网的“0”,还包含start二进制表示中最右边的一个1,
故(start & -start).bit_length() <= n.bit_length(), 所以在符合要求情况下,最长的用于构建子网的位为:
min((start & -start).bit_length(), n.bit_length()) - 1
mask = 32 - (min((start & -start).bit_length(), n.bit_length()) - 1) =》
33 - min((start & -start).bit_length(), n.bit_length()
十、1356. 根据数字二进制下 1 的数目排序.py
bin(x).count("1")
""" | 0 | 0 | 0 |
288bb81d358fa77f0218517e7452e76bd9daf534 | 34 | py | Python | oasislmf/cli/__init__.py | ibailey-SCOR/OasisLMF | 966b4de4e1e64851970f4291c5bdfe7edc20cb7a | [
"BSD-3-Clause"
] | 88 | 2018-03-24T11:57:10.000Z | 2022-03-21T13:04:41.000Z | oasislmf/cli/__init__.py | ibailey-SCOR/OasisLMF | 966b4de4e1e64851970f4291c5bdfe7edc20cb7a | [
"BSD-3-Clause"
] | 558 | 2018-03-14T14:16:30.000Z | 2022-03-29T12:48:14.000Z | oasislmf/cli/__init__.py | ibailey-SCOR/OasisLMF | 966b4de4e1e64851970f4291c5bdfe7edc20cb7a | [
"BSD-3-Clause"
] | 41 | 2018-04-09T11:13:12.000Z | 2021-10-05T14:43:11.000Z | from .root import RootCmd # noqa
| 17 | 33 | 0.735294 | from .root import RootCmd # noqa
| 0 | 0 | 0 |
c18b39d5ec83f88bf9a4528ab2fc41ae5f48f7d3 | 1,685 | py | Python | data/sex-difference.py | franpog859/darwinLogs | 1d0a9bdd7c928ceee96b3121e408fc1ee1d80e05 | [
"Apache-2.0"
] | 7 | 2019-02-21T10:50:09.000Z | 2019-10-16T06:22:27.000Z | data/sex-difference.py | franpog859/darwinLogs | 1d0a9bdd7c928ceee96b3121e408fc1ee1d80e05 | [
"Apache-2.0"
] | null | null | null | data/sex-difference.py | franpog859/darwinLogs | 1d0a9bdd7c928ceee96b3121e408fc1ee1d80e05 | [
"Apache-2.0"
] | 2 | 2019-10-16T17:34:25.000Z | 2020-01-18T20:40:02.000Z | import random
import matplotlib.pyplot as plt
import pandas as pd
from math import fabs
import numpy as np
sex_differences = []
pair_differences = []
for i in range(2, 100):
for j in range(i):
men = j
women = i - j
homo_pair = 0
hetero_pair = 0
for m in range(men):
men = men - 1
if random.randint(1,i) <= j:
men = men - 1
homo_pair = homo_pair + 1
else:
women = women - 1
hetero_pair = hetero_pair + 1
homo_pair = homo_pair + women // 2
sex_difference = fabs(2*j - i) / i
sex_differences.append(sex_difference)
pair_difference = homo_pair / (homo_pair + hetero_pair)
pair_differences.append(pair_difference)
plt.scatter(x=pair_differences, y=sex_differences, color='blue', label='generated characteristic')
darwin_output = pd.read_csv('output/logs.csv', sep=';')
darwin_sex_differences = np.fabs((np.array(darwin_output.adult_males_number.tolist()) - np.array(darwin_output.adult_females_number.tolist())) / (np.array(darwin_output.adult_males_number.tolist()) + np.array(darwin_output.adult_females_number.tolist())))
darwin_pair_differences = np.array(darwin_output.homo_couples_number.tolist()) / (np.array(darwin_output.homo_couples_number.tolist()) + np.array(darwin_output.straight_couples_number.tolist()))
plt.scatter(x=darwin_pair_differences, y=darwin_sex_differences, color='red', label='actual results')
plt.title('Sex difference')
plt.xlabel('How many more homosexual couples there are')
plt.ylabel('Difference in number of adults of a given sex')
plt.legend(loc=2)
plt.show()
| 35.851064 | 255 | 0.681306 | import random
import matplotlib.pyplot as plt
import pandas as pd
from math import fabs
import numpy as np
sex_differences = []
pair_differences = []
for i in range(2, 100):
for j in range(i):
men = j
women = i - j
homo_pair = 0
hetero_pair = 0
for m in range(men):
men = men - 1
if random.randint(1,i) <= j:
men = men - 1
homo_pair = homo_pair + 1
else:
women = women - 1
hetero_pair = hetero_pair + 1
homo_pair = homo_pair + women // 2
sex_difference = fabs(2*j - i) / i
sex_differences.append(sex_difference)
pair_difference = homo_pair / (homo_pair + hetero_pair)
pair_differences.append(pair_difference)
plt.scatter(x=pair_differences, y=sex_differences, color='blue', label='generated characteristic')
darwin_output = pd.read_csv('output/logs.csv', sep=';')
darwin_sex_differences = np.fabs((np.array(darwin_output.adult_males_number.tolist()) - np.array(darwin_output.adult_females_number.tolist())) / (np.array(darwin_output.adult_males_number.tolist()) + np.array(darwin_output.adult_females_number.tolist())))
darwin_pair_differences = np.array(darwin_output.homo_couples_number.tolist()) / (np.array(darwin_output.homo_couples_number.tolist()) + np.array(darwin_output.straight_couples_number.tolist()))
plt.scatter(x=darwin_pair_differences, y=darwin_sex_differences, color='red', label='actual results')
plt.title('Sex difference')
plt.xlabel('How many more homosexual couples there are')
plt.ylabel('Difference in number of adults of a given sex')
plt.legend(loc=2)
plt.show()
| 0 | 0 | 0 |
514cd2c6cf41ca4554af2219f11300bbd63be9fd | 378 | py | Python | project_summarizer/__main__.py | jackdewinter/pyscan | 05ea9bff0aaf4d53aa401c51526bb847accec56a | [
"MIT"
] | 1 | 2021-01-14T17:39:18.000Z | 2021-01-14T17:39:18.000Z | project_summarizer/__main__.py | jackdewinter/pyscan | 05ea9bff0aaf4d53aa401c51526bb847accec56a | [
"MIT"
] | 17 | 2020-08-15T23:27:28.000Z | 2022-02-20T18:23:49.000Z | project_summarizer/__main__.py | jackdewinter/pyscan | 05ea9bff0aaf4d53aa401c51526bb847accec56a | [
"MIT"
] | null | null | null | """
Module to provide for "-m project_summarizer" access to the module,
as if it was run from the console.
"""
import project_summarizer
def main():
"""
Main entry point. Exposed in this manner so that the setup
entry_points configuration has something to execute.
"""
project_summarizer.ProjectSummarizer().main()
if __name__ == "__main__":
main()
| 21 | 67 | 0.701058 | """
Module to provide for "-m project_summarizer" access to the module,
as if it was run from the console.
"""
import project_summarizer
def main():
"""
Main entry point. Exposed in this manner so that the setup
entry_points configuration has something to execute.
"""
project_summarizer.ProjectSummarizer().main()
if __name__ == "__main__":
main()
| 0 | 0 | 0 |
ca88939b873d30889c8ab1eb22c81b8ae87e1875 | 18,742 | py | Python | notebooks/example_utils.py | neonithinar/hexagdly | dcd15bfb7bdabb4f6280f0598f2cf0b923924a81 | [
"MIT"
] | 67 | 2018-02-10T13:54:16.000Z | 2022-01-31T05:41:40.000Z | notebooks/example_utils.py | neonithinar/hexagdly | dcd15bfb7bdabb4f6280f0598f2cf0b923924a81 | [
"MIT"
] | 4 | 2018-02-21T16:28:38.000Z | 2020-05-02T17:01:01.000Z | notebooks/example_utils.py | neonithinar/hexagdly | dcd15bfb7bdabb4f6280f0598f2cf0b923924a81 | [
"MIT"
] | 17 | 2018-05-25T12:30:19.000Z | 2021-07-19T05:48:47.000Z | """
HexagDLy utilities for illustrative examples.
"""
import numpy as np
import numpy.linalg as LA
from scipy.interpolate import griddata
import torch
import torch.utils.data
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import torch.nn.functional as F
import torch.optim.lr_scheduler as scheduler
import os
import matplotlib.pyplot as plt
import time
class toy_data:
r"""Object that contains a set of toy images of randomly scattered
hexagonal shapes of a certain kind.
Args:
shape: str, choose from ...
nx: int, dimension in x
ny: int, dimension in y
nchannels: int, number of input channels ('colour' channels)
nexamples: int, number of images
px: int, center row for shape
py: int, center column for shape
"""
###################################################################
class toy_data2:
r"""Object that contains a set of toy images of randomly scattered
hexagonal shapes of a certain kind.
Args:
shape: str, choose from ...
nx: int, dimension in x
ny: int, dimension in y
nchannels: int, number of input channels ('colour' channels)
nexamples: int, number of images
px: int, center row for shape
py: int, center column for shape
"""
class toy_dataset:
r"""Object that creates a data set containing different shapes
Args:
shapes: list of strings with names of different shapes
nperclass: int, number of images of each shape
nx: int, number of columns of pixels
ny: int, number of rows of pixels
nchannels: int, number of channels for each image
"""
class model:
r"""A toy model CNN
Args:
train_dataloader: pytorch dataloader with training data
val_dataloader: pytorch dataloader with validation data
net: CNN model
epochs: int, number of epochs to train
"""
| 35.699048 | 112 | 0.505336 | """
HexagDLy utilities for illustrative examples.
"""
import numpy as np
import numpy.linalg as LA
from scipy.interpolate import griddata
import torch
import torch.utils.data
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import torch.nn.functional as F
import torch.optim.lr_scheduler as scheduler
import os
import matplotlib.pyplot as plt
import time
def put_shape(nx, ny, cx, cy, params):
d = np.zeros((nx, ny))
i = np.indices((nx, ny))
i[0] = i[0] - cx
i[1] = i[1] - cy
i = i.astype(float)
i[0] *= 1.73205 / 2
if np.mod(cx, 2) == 0:
i[1][np.mod(cx + 1, 2) :: 2] += 0.5
else:
i[1][np.mod(cx + 1, 2) :: 2] -= 0.5
di = i[0] ** 2 + i[1] ** 2
for t1, t2 in params:
di = np.where(np.logical_and(di >= t2, di <= t1), 1, di)
di = np.where(di > 1.1, 0, di)
return di.transpose()
class toy_data:
r"""Object that contains a set of toy images of randomly scattered
hexagonal shapes of a certain kind.
Args:
shape: str, choose from ...
nx: int, dimension in x
ny: int, dimension in y
nchannels: int, number of input channels ('colour' channels)
nexamples: int, number of images
px: int, center row for shape
py: int, center column for shape
"""
def __init__(
self,
shape,
nx=16,
ny=16,
nchannels=1,
nexamples=1,
noisy=None,
px=None,
py=None,
):
self.shapes = {
"small_hexagon": [(1, 0)],
"medium_hexagon": [(4, 0)],
"snowflake_1": [(3, 0)],
"snowflake_2": [(1, 0), (4.1, 3.9)],
"snowflake_3": [(7, 3)],
"snowflake_4": [(7, 0)],
"double_hex": [(10, 5)],
}
self.nx = nx
self.ny = ny
if noisy:
self.image_data = np.random.normal(0, noisy, (nexamples, nchannels, ny, nx))
else:
self.image_data = np.zeros((nexamples, nchannels, ny, nx))
for ie, example in enumerate(self.image_data):
for ic, channel in enumerate(example):
if not px and not py:
cx, cy = int(ny * np.random.random()), int(nx * np.random.random())
else:
cx, cy = px, py
face = put_shape(self.nx, self.ny, cx, cy, self.shapes[shape])
self.image_data[ie, ic, :, :] += face
def to_h5(self, filename):
f = h5py.File(filename + ".h5", "w")
f.create_dataset("image_data", data=self.image_data)
def to_torch_tensor(self):
return torch.Tensor(self.image_data)
###################################################################
class Shape(object):
def __init__(self, nx, ny, scale=3, rotation=False):
self.nx = nx
self.ny = ny
self.X = np.zeros(self.nx * self.ny)
self.Y = np.zeros(self.nx * self.ny)
i = 0
for x in range(self.nx):
for y in range(self.ny):
self.X[i], self.Y[i] = x * np.sqrt(3) / 2, -(y + np.mod(x, 2) * 0.5)
i += 1
self.xmin = np.min(self.X)
self.xmax = np.max(self.X)
self.ymin = np.min(self.Y)
self.ymax = np.max(self.Y)
self.P = np.stack([self.X.flatten(), self.Y.flatten()], axis=1)
self.size = 0.5
self.scale = scale
self.rotation = rotation
def polar_to_cartesian(self, r, alpha):
x = r * np.cos(alpha)
y = r * np.sin(alpha)
return np.array([x, y])
def image_from_points(self, point_list_1, point_list_2):
ind = np.full(len(self.P), False)
for p1, p2 in zip(point_list_1, point_list_2):
pa = p2 - p1
alpha = np.arctan2(pa[1], pa[0])
pb = self.P - p1
beta = np.arctan2(pb[:, 1], pb[:, 0])
vlen = LA.norm(pb, axis=1)
dist = np.abs(self.polar_to_cartesian(vlen, beta - alpha)[1])
tmp = np.where(dist < self.size, True, False)
xmin = np.min([p1[0], p2[0]])
xmax = np.max([p1[0], p2[0]])
if np.abs(xmax - xmin) > 1e-12:
xborder1 = np.where(self.P[:, 0] < xmin, False, True)
xborder2 = np.where(self.P[:, 0] > xmax, False, True)
xborder = np.logical_and(xborder1, xborder2)
else:
xborder = np.full(len(self.P), True)
ymin = np.min([p1[1], p2[1]])
ymax = np.max([p1[1], p2[1]])
if np.abs(ymax - ymin) > 1e-12:
yborder1 = np.where(self.P[:, 1] < ymin, False, True)
yborder2 = np.where(self.P[:, 1] > ymax, False, True)
yborder = np.logical_and(yborder1, yborder2)
else:
yborder = np.full(len(self.P), True)
border = np.logical_and(xborder, yborder)
tmp = np.logical_and(tmp, border)
ind = np.logical_or(ind, tmp)
return np.where(ind, 1, 0)
def point_list_for_triangle(self, centre, rotation=0.0):
a1, a2, a3 = -np.pi / 6, np.pi / 2, np.pi * 7 / 6
P1 = self.polar_to_cartesian(self.scale, a1 + rotation) + centre
P2 = self.polar_to_cartesian(self.scale, a2 + rotation) + centre
P3 = self.polar_to_cartesian(self.scale, a3 + rotation) + centre
return [P1, P2, P3], [P2, P3, P1]
def point_list_for_square(self, centre, rotation=0.0):
a1, a2, a3, a4 = np.pi / 4, np.pi * 3 / 4, -np.pi * 3 / 4, -np.pi / 4
P1 = self.polar_to_cartesian(self.scale, a1 + rotation) + centre
P2 = self.polar_to_cartesian(self.scale, a2 + rotation) + centre
P3 = self.polar_to_cartesian(self.scale, a3 + rotation) + centre
P4 = self.polar_to_cartesian(self.scale, a4 + rotation) + centre
return [P1, P2, P3, P4], [P2, P3, P4, P1]
def image_triangle(self, centre, rotation):
p1, p2 = self.point_list_for_triangle(centre, rotation)
return self.image_from_points(p1, p2)
def image_square(self, centre, rotation):
p1, p2 = self.point_list_for_square(centre, rotation)
return self.image_from_points(p1, p2)
def image_circle(self, centre):
dist = np.abs(np.linalg.norm(self.P - centre, axis=1) - self.scale)
return np.where(dist < self.size, 1, 0)
def __call__(self, shape="circle"):
x = self.xmin + (self.xmax - self.xmin) * np.random.rand()
y = self.ymin + (self.ymax - self.ymin) * np.random.rand()
if self.rotation:
r = 2 * np.pi * np.random.rand()
else:
r = 0.0
if shape == "circle":
centre = np.array([[x, y]])
return self.image_circle(centre).reshape((self.nx, self.ny)).T
elif shape == "triangle":
centre = np.array([x, y])
return (
self.image_triangle(centre, r + np.pi / 7.5)
.reshape((self.nx, self.ny))
.T
)
elif shape == "square":
centre = np.array([x, y])
return (
self.image_square(centre, r + np.pi / 3).reshape((self.nx, self.ny)).T
)
else:
return None
class toy_data2:
r"""Object that contains a set of toy images of randomly scattered
hexagonal shapes of a certain kind.
Args:
shape: str, choose from ...
nx: int, dimension in x
ny: int, dimension in y
nchannels: int, number of input channels ('colour' channels)
nexamples: int, number of images
px: int, center row for shape
py: int, center column for shape
"""
def __init__(self, shape, nx=16, ny=16, nchannels=1, nexamples=1, noisy=None):
self.nx = nx
self.ny = ny
self.shape = Shape(nx, ny, (nx + ny) / 6, True)
if noisy:
self.image_data = np.random.normal(0, noisy, (nexamples, nchannels, ny, nx))
else:
self.image_data = np.zeros((nexamples, nchannels, ny, nx))
for ie, example in enumerate(self.image_data):
for ic, channel in enumerate(example):
self.image_data[ie, ic, :, :] += self.shape(shape)
def to_h5(self, filename):
f = h5py.File(filename + ".h5", "w")
f.create_dataset("image_data", data=self.image_data)
def to_torch_tensor(self):
return torch.Tensor(self.image_data)
class toy_dataset:
r"""Object that creates a data set containing different shapes
Args:
shapes: list of strings with names of different shapes
nperclass: int, number of images of each shape
nx: int, number of columns of pixels
ny: int, number of rows of pixels
nchannels: int, number of channels for each image
"""
def __init__(self, shapes, nperclass, nx=16, ny=16, nchannels=1, noisy=None):
self.shapes = shapes
self.image_data = np.zeros((len(shapes) * nperclass, nchannels, ny, nx))
self.labels = np.zeros(len(shapes) * nperclass)
self.nx = nx
self.ny = ny
self.nchannels = nchannels
self.nperclass = nperclass
self.noisy = noisy
self.square_image_data = None
self.square_benchmark = None
def create(self):
d = [
toy_data(
shape, self.nx, self.ny, self.nchannels, self.nperclass, self.noisy
)
for shape in self.shapes
]
indices = np.arange(len(self.shapes) * self.nperclass)
np.random.shuffle(indices)
icount = 0
for s, label in zip(d, np.arange(len(self.shapes), dtype=np.int)):
for image in s.image_data:
for ic, c in enumerate(image):
self.image_data[indices[icount], ic] = c
self.labels[indices[icount]] = int(label)
icount += 1
def convert_to_square(self, scale=1, method="linear"):
t0 = time.time()
X = np.zeros(self.nx * self.ny)
Y = np.zeros(self.nx * self.ny)
i = 0
for x in range(self.nx):
for y in range(self.ny):
X[i], Y[i] = x * np.sqrt(3) / 2, -(y + np.mod(x, 2) * 0.5)
i += 1
grid_x, grid_y = np.meshgrid(
np.linspace(0, max(X), scale * self.nx),
np.linspace(0, min(Y), scale * self.ny),
)
self.square_image_data = np.zeros(
(
len(self.shapes) * self.nperclass,
self.nchannels,
scale * self.ny,
scale * self.nx,
)
)
for ie, example in enumerate(self.image_data):
for ic, image in enumerate(example):
Z = image[:].flatten("F")
tmp = griddata((X, Y), Z, (grid_x, grid_y), method=method)
tmp -= np.nan_to_num(tmp).min()
tmp /= np.nan_to_num(tmp).max()
tmp = np.nan_to_num(tmp)
self.square_image_data[ie, ic, :, :] += tmp
self.square_benchmark = time.time() - t0
def to_torch_tensor(self, sampling="hexagon"):
if sampling == "square":
return torch.Tensor(self.square_image_data)
else:
return torch.Tensor(self.image_data)
def to_dataloader(self, batchsize=8, shuffle=True, sampling="hexagon"):
if sampling == "square":
assert (
self.square_image_data is not None
), "No square images, please convert first!"
image_data = self.square_image_data
else:
image_data = self.image_data
data, label = torch.from_numpy(image_data), torch.from_numpy(self.labels)
tensor_dataset = torch.utils.data.TensorDataset(data, label)
dataloader = torch.utils.data.DataLoader(
tensor_dataset,
batch_size=batchsize,
shuffle=shuffle,
num_workers=max(1, os.sysconf("SC_NPROCESSORS_ONLN") // 2),
)
return dataloader
class model:
r"""A toy model CNN
Args:
train_dataloader: pytorch dataloader with training data
val_dataloader: pytorch dataloader with validation data
net: CNN model
epochs: int, number of epochs to train
"""
def __init__(self, train_dataloader, val_dataloader, net, epochs=10):
self.train_dataloader = train_dataloader
self.val_dataloader = val_dataloader
self.net = net
self.epochs = epochs
def train(self, lr=0.005):
nbts = 16
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(
self.net.parameters(), lr=lr, momentum=0.9, weight_decay=0.004
)
self.tepoch = []
self.tloss = []
self.taccu = []
self.tlr = []
self.vepoch = []
self.vloss = []
self.vaccu = []
self.train_time = 0
self.scheduler = scheduler.ReduceLROnPlateau(
optimizer,
mode="max",
factor=0.5,
patience=10,
verbose=False,
threshold=1,
threshold_mode="abs",
min_lr=1e-10,
)
for epoch in range(self.epochs):
print("Epoch %d" % (epoch + 1))
if torch.cuda.is_available():
self.net = self.net.cuda()
for dataloader, net_phase, phase in zip(
[self.train_dataloader, self.train_dataloader, self.val_dataloader],
["train", "eval", "eval"],
["training", "train_lc", "val_lc"],
):
if net_phase == "train":
t0 = time.time()
num_batches = len(dataloader)
running_loss = 0.0
total = 0.0
correct = 0.0
batch_counter = 0.0
getattr(self.net, net_phase)()
for i, data in enumerate(dataloader, 0):
inputs, labels = data
inputs, labels = Variable(inputs).float(), Variable(labels).long()
if torch.cuda.is_available():
inputs, labels = inputs.cuda(), labels.cuda()
optimizer.zero_grad()
outputs = self.net(inputs)
tloss = criterion(outputs, labels)
tloss.backward()
optimizer.step()
running_loss += tloss.item()
total += outputs.data.size()[0]
_, predicted = torch.max(outputs.data, 1)
correct += (predicted == labels.data).sum()
if i % nbts == nbts - 1:
current_epoch = epoch + (batch_counter + 1) / num_batches
current_lr = optimizer.param_groups[0]["lr"]
mean_loss = running_loss / nbts
mean_accuracy = 100 * correct.float() / total
print(
"epoch: %d (%.3f) %s - %5d batches -> mean loss: %.3f, lr: %.3f, mean acc.: %.2f %%"
% (
epoch + 1,
current_epoch,
phase,
i + 1,
mean_loss,
current_lr,
mean_accuracy,
)
)
running_loss = 0.0
total = 0.0
correct = 0.0
if phase == "train_lc":
self.tepoch.append(current_epoch)
self.tloss.append(mean_loss)
self.taccu.append(mean_accuracy)
self.tlr.append(current_lr)
elif phase == "val_lc":
self.vepoch.append(current_epoch)
self.vloss.append(mean_loss)
self.vaccu.append(mean_accuracy)
self.scheduler.step(mean_accuracy)
batch_counter += 1.0
batch_counter = 0.0
if net_phase == "train":
self.train_time += time.time() - t0
self.train_time /= self.epochs
def save_current(self):
torch.save(
self.net.state_dict(),
str(self.net.__class__.__name__) + "_" + str(self.epochs) + ".ptmodel",
)
def load(self, filename):
self.net.load_state_dict(torch.load(filename))
def get_lc(self):
return (
np.array(self.tepoch),
np.array(self.tloss),
np.array(self.taccu),
np.array(self.vepoch),
np.array(self.vloss),
np.array(self.vaccu),
np.array(self.train_time),
)
def plot_lc(self, scale_to_time=False):
fig = plt.figure("learning_curves", (7, 7))
axa = fig.add_subplot(311)
axb = fig.add_subplot(312)
axc = fig.add_subplot(313)
tx_axis = np.array(self.tepoch)
vx_axis = np.array(self.vepoch)
if scale_to_time:
tx_axis *= self.train_time
vx_axis *= self.train_time
axa.plot(vx_axis, self.vaccu, "-", lw=1)
axa.set_ylabel("accuracy [%]", size=15)
axa.tick_params(
axis="both",
which="both",
labelsize=10,
bottom=False,
top=False,
labelbottom=False,
)
axb.plot(vx_axis, self.vloss, "-", label=self.net.name, lw=1)
axb.legend()
axb.set_ylabel("loss", size=15)
axb.tick_params(
axis="both",
which="both",
labelsize=10,
bottom=False,
top=False,
labelbottom=False,
)
axc.plot(tx_axis, self.tlr, lw=1)
axc.set_yscale("log")
axc.set_ylabel("learning rate", size=15)
if scale_to_time:
axc.set_xlabel("train time [s]", size=15)
else:
axc.set_xlabel("# Epochs", size=15)
axc.tick_params(
axis="both",
which="both",
labelsize=10,
bottom=True,
top=True,
labelbottom=True,
)
fig.canvas.draw()
plt.show()
| 15,813 | -1 | 747 |
ec341a894798ee4489508adfa9e19d8e1399afde | 653 | py | Python | Practice/Mo's Algo_1.py | rajansh87/Algorithms-Implementations | 1f3dd1bc2decf10638fe0fdeeede47a650a9057b | [
"MIT"
] | 1 | 2020-05-10T19:01:51.000Z | 2020-05-10T19:01:51.000Z | Practice/Mo's Algo_1.py | rajansh87/Algorithms-Implementations | 1f3dd1bc2decf10638fe0fdeeede47a650a9057b | [
"MIT"
] | 9 | 2021-03-17T18:10:18.000Z | 2021-03-29T19:35:06.000Z | Practice/Mo's Algo_1.py | rajansh87/Data-Structures-and-Algorithms-Implementations | 0529079fbcd4d1a047210e9f2ff42c194c0818fe | [
"MIT"
] | null | null | null | # sum of elements in given range
arr=list(map(int,input().split()))
m=int(input("query size: "))
query=[]
for i in range(m):
l,r=map(int,input().split())
query.append([l,r])
query.sort(key=lambda x:x[1])
curL,curR,curS=0,0,0
for i in range(len(query)):
l,r=query[i]
while curL<l: #move to right
curS-=arr[curL]
curL+=1
while curL>l: #move to left
curS+=arr[curL-1]
curL-=1
while curR<=r: #move to right
curS+=arr[curR]
curR+=1
while curR>r+1: #move to left
curS-=arr[curR-1]
curR-=1
print("sum of ",query[i],": ",curS)
| 21.064516 | 39 | 0.529862 | # sum of elements in given range
arr=list(map(int,input().split()))
m=int(input("query size: "))
query=[]
for i in range(m):
l,r=map(int,input().split())
query.append([l,r])
query.sort(key=lambda x:x[1])
curL,curR,curS=0,0,0
for i in range(len(query)):
l,r=query[i]
while curL<l: #move to right
curS-=arr[curL]
curL+=1
while curL>l: #move to left
curS+=arr[curL-1]
curL-=1
while curR<=r: #move to right
curS+=arr[curR]
curR+=1
while curR>r+1: #move to left
curS-=arr[curR-1]
curR-=1
print("sum of ",query[i],": ",curS)
| 0 | 0 | 0 |
cee5f59982d776bc1eb45cd99f5023af14e334bd | 3,787 | py | Python | wandbox/__nim__.py | srz-zumix/wandbox-api | 009a5080f8b10068c203dce0b625dd9b38d9b046 | [
"MIT"
] | 6 | 2017-03-16T15:09:08.000Z | 2022-01-11T02:19:36.000Z | wandbox/__nim__.py | srz-zumix/wandbox-api | 009a5080f8b10068c203dce0b625dd9b38d9b046 | [
"MIT"
] | 30 | 2020-04-20T12:21:28.000Z | 2022-01-23T13:58:57.000Z | wandbox/__nim__.py | srz-zumix/wandbox-api | 009a5080f8b10068c203dce0b625dd9b38d9b046 | [
"MIT"
] | 2 | 2020-04-20T13:28:38.000Z | 2020-05-30T11:26:02.000Z | import re
import os
from .cli import CLI
from .runner import Runner
from .__cxx__ import CxxRunner
if __name__ == '__main__':
main()
| 34.743119 | 109 | 0.581991 | import re
import os
from .cli import CLI
from .runner import Runner
from .__cxx__ import CxxRunner
class NimRunner(Runner):
IMPORT_REGEX = re.compile(r'^\s*import\s*(.*?)(\s*except\s*.*|)$')
FROM_IMPORT_REGEX = re.compile(r'^\s*from\s*(\S*?)\s*import\s*(.*?)$')
C_PROC_REGEX = re.compile(r'^\s*proc.*{.*\.header\s*:\s*([\'"].*[\'"]).*}\s*$')
PUSH_HEADER_REGEX = re.compile(r'^\s*{\.push\s*.*header\s*:\s*([\'"].*[\'"]).*}$')
def __init__(self, lang, compiler, save, encoding, retry, retry_wait, prefix_chars='-'):
super(NimRunner, self).__init__(lang, compiler, save, encoding, retry, retry_wait, prefix_chars)
self.cxx = CxxRunner(lang, compiler, save, encoding, retry, retry_wait, prefix_chars)
def reset(self):
self.imports = []
self.cxx.reset()
def make_code(self, file, filepath, filename):
files = dict()
code = ''
for line in file:
codeline = re.sub(r'\s*#.*$', '', line)
m = self.IMPORT_REGEX.match(codeline)
if m:
files.update(self.get_imports(m, filepath))
else:
m = self.FROM_IMPORT_REGEX.match(codeline)
if m:
files.update(self.get_from_imports(m, filepath))
else:
m = self.C_PROC_REGEX.match(codeline)
if m:
files.update(self.get_c_header(m, filepath))
else:
m = self.PUSH_HEADER_REGEX.match(codeline)
if m:
files.update(self.get_c_header(m, filepath))
code += line
files[filename] = code
return files
def get_c_header(self, m, filepath):
header = m.group(1).strip('\'"')
return self.get_c(os.path.dirname(filepath), header)
def get_c(self, path, filename):
module_path = os.path.normpath(os.path.join(path, filename))
if os.path.exists(module_path):
module_abspath = os.path.abspath(module_path)
if module_abspath not in self.imports:
return self.cxx.open_code(module_path, filename)
return dict()
def get_from_imports(self, m, filepath):
files = dict()
module = m.group(1).strip('\'"')
module_names = module.split('.')
if len(module_names) == 0:
files.update(self.get_import(os.path.dirname(filepath), os.path.dirname(filepath)))
else:
module_name = os.path.join(*module_names)
files.update(self.get_import(os.path.dirname(filepath), module_name))
return files
def get_imports(self, m, filepath):
files = dict()
modules = m.group(1).strip('\'"')
for module_name in modules.split(','):
files.update(self.get_import(os.path.dirname(filepath), module_name.strip()))
return files
def get_import(self, path, module_name):
module_file = module_name + '.nim'
module_path = os.path.normpath(os.path.join(path, module_file))
if os.path.exists(module_path):
module_abspath = os.path.abspath(module_path)
if module_abspath not in self.imports:
self.imports.append(module_abspath)
return self.open_code(module_path, module_file)
return dict()
class NimCLI(CLI):
def __init__(self, compiler=None):
super(NimCLI, self).__init__('Nim', compiler, False)
def get_runner(self, args, options):
return NimRunner(args.language, args.compiler, args.save, args.encoding, args.retry, args.retry_wait)
def nim(compiler=None):
cli = NimCLI(compiler)
cli.execute()
def main():
nim()
if __name__ == '__main__':
main()
| 2,963 | 534 | 146 |
5ffc09ccc88b108f92c7dd0b1f9a1a5c812f7d30 | 3,861 | py | Python | src/bitpay/models/subscription/subscription.py | ppatidar2021/python-bitpay-client | b0bd6ef6f2ce2950fb42e6583113e225639a4340 | [
"MIT"
] | 3 | 2022-01-24T17:02:22.000Z | 2022-03-10T00:57:20.000Z | src/bitpay/models/subscription/subscription.py | ppatidar2021/python-bitpay-client | b0bd6ef6f2ce2950fb42e6583113e225639a4340 | [
"MIT"
] | 1 | 2022-03-08T03:11:09.000Z | 2022-03-09T12:51:13.000Z | src/bitpay/models/subscription/subscription.py | ppatidar2021/python-bitpay-client | b0bd6ef6f2ce2950fb42e6583113e225639a4340 | [
"MIT"
] | 3 | 2021-12-24T05:57:33.000Z | 2022-03-14T09:17:40.000Z | """
Subscription: Subscriptions are repeat billing agreements with specific buyers.
BitPay sends bill emails to buyers identified in active subscriptions according
to the specified schedule.
"""
from .bill_data import BillData
from ...utils.key_utils import change_camel_case_to_snake_case
class Subscription:
"""
Subscription
"""
__id = None
__status = None
"""
BillData
"""
__bill_data = None
__schedule = None
__next_delivery = None
__created_date = None
__token = None
def get_id(self):
"""
Get method for the id
:return: id
"""
return self.__id
def set_id(self, id):
"""
Set method for to id
:param id: id
"""
self.__id = id
def get_status(self):
"""
Get method for the status
:return: status
"""
return self.__status
def set_status(self, status):
"""
Set method for to status
:param status: status
"""
self.__status = status
def get_bill_data(self):
"""
Get method for the bill_data
:return: bill_data
"""
return self.__bill_data
def set_bill_data(self, bill_data: BillData):
"""
Set method for to bill_data
:param bill_data: bill_data
"""
self.__bill_data = bill_data
def get_schedule(self):
"""
Get method for the schedule
:return: schedule
"""
return self.__schedule
def set_schedule(self, schedule):
"""
Set method for to schedule
:param schedule: schedule
"""
self.__schedule = schedule
def get_next_delivery(self):
"""
Get method for the next_delivery
:return: next_delivery
"""
return self.__next_delivery
def set_next_delivery(self, next_delivery):
"""
Set method for to next_delivery
:param next_delivery: next_delivery
"""
self.__next_delivery = next_delivery
def get_created_date(self):
"""
Get method for the created_date
:return: created_date
"""
return self.__created_date
def set_created_date(self, created_date):
"""
Set method for to created_date
:param created_date: created_date
"""
self.__created_date = created_date
def get_token(self):
"""
Get method for the token
:return: token
"""
return self.__token
def set_token(self, token):
"""
Set method for to token
:param token: token
"""
self.__token = token
def to_json(self):
"""
:return: data in json
"""
data = {
"id": self.get_id(),
"status": self.get_status(),
"billData": self.get_bill_data().to_json(),
"schedule": self.get_schedule(),
"nextDelivery": self.get_next_delivery(),
"createdDate": self.get_created_date(),
"token": self.get_token(),
}
data = {key: value for key, value in data.items() if value}
return data
| 24.75 | 85 | 0.532246 | """
Subscription: Subscriptions are repeat billing agreements with specific buyers.
BitPay sends bill emails to buyers identified in active subscriptions according
to the specified schedule.
"""
from .bill_data import BillData
from ...utils.key_utils import change_camel_case_to_snake_case
class Subscription:
"""
Subscription
"""
__id = None
__status = None
"""
BillData
"""
__bill_data = None
__schedule = None
__next_delivery = None
__created_date = None
__token = None
def __init__(self, **kwargs):
for key, value in kwargs.items():
try:
if key in ["billData"]:
klass = globals()[key[0].upper() + key[1:]]
if isinstance(value, list):
objs = []
for obj in value:
objs.append(klass(**obj))
value = objs
else:
value = klass(**value)
getattr(self, "set_%s" % change_camel_case_to_snake_case(key))(value)
except AttributeError:
pass
def get_id(self):
"""
Get method for the id
:return: id
"""
return self.__id
def set_id(self, id):
"""
Set method for to id
:param id: id
"""
self.__id = id
def get_status(self):
"""
Get method for the status
:return: status
"""
return self.__status
def set_status(self, status):
"""
Set method for to status
:param status: status
"""
self.__status = status
def get_bill_data(self):
"""
Get method for the bill_data
:return: bill_data
"""
return self.__bill_data
def set_bill_data(self, bill_data: BillData):
"""
Set method for to bill_data
:param bill_data: bill_data
"""
self.__bill_data = bill_data
def get_schedule(self):
"""
Get method for the schedule
:return: schedule
"""
return self.__schedule
def set_schedule(self, schedule):
"""
Set method for to schedule
:param schedule: schedule
"""
self.__schedule = schedule
def get_next_delivery(self):
"""
Get method for the next_delivery
:return: next_delivery
"""
return self.__next_delivery
def set_next_delivery(self, next_delivery):
"""
Set method for to next_delivery
:param next_delivery: next_delivery
"""
self.__next_delivery = next_delivery
def get_created_date(self):
"""
Get method for the created_date
:return: created_date
"""
return self.__created_date
def set_created_date(self, created_date):
"""
Set method for to created_date
:param created_date: created_date
"""
self.__created_date = created_date
def get_token(self):
"""
Get method for the token
:return: token
"""
return self.__token
def set_token(self, token):
"""
Set method for to token
:param token: token
"""
self.__token = token
def to_json(self):
"""
:return: data in json
"""
data = {
"id": self.get_id(),
"status": self.get_status(),
"billData": self.get_bill_data().to_json(),
"schedule": self.get_schedule(),
"nextDelivery": self.get_next_delivery(),
"createdDate": self.get_created_date(),
"token": self.get_token(),
}
data = {key: value for key, value in data.items() if value}
return data
| 602 | 0 | 27 |
6a95395d3565ef2d706e106a3ada8672c0593121 | 541 | py | Python | python/ember/__init__.py | BangShiuh/ember | f0a70c7e01ae0dd7b5bd5ee70c8fc5d3f7207388 | [
"MIT"
] | 27 | 2016-11-22T08:29:48.000Z | 2021-12-01T12:15:39.000Z | python/ember/__init__.py | minhbau/ember | f0a70c7e01ae0dd7b5bd5ee70c8fc5d3f7207388 | [
"MIT"
] | 11 | 2015-02-12T14:12:33.000Z | 2021-04-15T15:53:03.000Z | python/ember/__init__.py | minhbau/ember | f0a70c7e01ae0dd7b5bd5ee70c8fc5d3f7207388 | [
"MIT"
] | 20 | 2016-05-15T04:51:36.000Z | 2022-01-26T09:07:35.000Z | import os.path as _path
import cantera
from ._ember import *
from . import _ember
from .input import *
from .output import *
from . import utils
__version__ = '1.4.0'
# Add Ember's data file directory to Cantera's search path. Because the Python
# module is statically linked to Cantera, this needs to be done separately for
# each of the two copies of the Cantera library that have been loaded.
_datapath = _path.join(_path.dirname(_path.abspath(__file__)), 'data')
_ember.addCanteraDirectory(_datapath)
cantera.add_directory(_datapath)
| 30.055556 | 78 | 0.778189 | import os.path as _path
import cantera
from ._ember import *
from . import _ember
from .input import *
from .output import *
from . import utils
__version__ = '1.4.0'
# Add Ember's data file directory to Cantera's search path. Because the Python
# module is statically linked to Cantera, this needs to be done separately for
# each of the two copies of the Cantera library that have been loaded.
_datapath = _path.join(_path.dirname(_path.abspath(__file__)), 'data')
_ember.addCanteraDirectory(_datapath)
cantera.add_directory(_datapath)
| 0 | 0 | 0 |
8caed7bdf72c8d9b1fb3c833bc36f023ba4a193a | 2,728 | py | Python | slider-agent/src/test/python/agent/TestShell.py | pramodthangali/incubator-slider | 8434b8971af93fe316e4b4c9f6d4a2a917caedd0 | [
"Apache-2.0"
] | 1 | 2019-01-18T21:00:19.000Z | 2019-01-18T21:00:19.000Z | slider-agent/src/test/python/agent/TestShell.py | pramodthangali/incubator-slider | 8434b8971af93fe316e4b4c9f6d4a2a917caedd0 | [
"Apache-2.0"
] | null | null | null | slider-agent/src/test/python/agent/TestShell.py | pramodthangali/incubator-slider | 8434b8971af93fe316e4b4c9f6d4a2a917caedd0 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import os
import unittest
import tempfile
from mock.mock import patch, MagicMock, call
from agent import shell
from sys import platform as _platform
import subprocess, time
import sys
import platform
| 35.894737 | 107 | 0.728739 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import os
import unittest
import tempfile
from mock.mock import patch, MagicMock, call
from agent import shell
from sys import platform as _platform
import subprocess, time
import sys
import platform
class TestShell(unittest.TestCase):
unsupported_for_test = []
def linux_distribution(self):
PYTHON_VER = sys.version_info[0] * 10 + sys.version_info[1]
if PYTHON_VER < 26:
linux_dist = platform.dist()
else:
linux_dist = platform.linux_distribution()
return linux_dist
def test_kill_process_with_children(self):
dist = self.linux_distribution()
operatingSystem = dist[0].lower()
if operatingSystem in self.unsupported_for_test:
return
if _platform == "linux" or _platform == "linux2": # Test is Linux-specific
gracefull_kill_delay_old = shell.gracefull_kill_delay
shell.gracefull_kill_delay = 0.1
sleep_cmd = "sleep 10"
test_cmd = """ (({0}) & ({0} & {0})) """.format(sleep_cmd)
# Starting process tree (multiple process groups)
test_process = subprocess.Popen(test_cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE, shell=True)
time.sleep(0.3) # Delay to allow subprocess to start
# Check if processes are running
ps_cmd = """ps aux """
ps_process = subprocess.Popen(ps_cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE, shell=True)
(out, err) = ps_process.communicate()
self.assertTrue(sleep_cmd in out)
# Kill test process
shell.kill_process_with_children(test_process.pid)
test_process.communicate()
# Now test process should not be running
ps_process = subprocess.Popen(ps_cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE, shell=True)
(out, err) = ps_process.communicate()
self.assertFalse(sleep_cmd in out)
shell.gracefull_kill_delay = gracefull_kill_delay_old
else:
# Do not run under other systems
pass
| 1,600 | 93 | 23 |
fe878e67227214f872037684567ed0df2357ce71 | 832 | py | Python | setup.py | rec/recs | 24d545cfa12129dc6c413f0defad08bac6f5ff14 | [
"MIT"
] | 2 | 2019-05-26T15:09:37.000Z | 2019-06-15T10:18:45.000Z | setup.py | rec/recs | 24d545cfa12129dc6c413f0defad08bac6f5ff14 | [
"MIT"
] | null | null | null | setup.py | rec/recs | 24d545cfa12129dc6c413f0defad08bac6f5ff14 | [
"MIT"
] | null | null | null | from setuptools import setup
import recs
_classifiers = [
'Development Status :: 4 - Beta',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Topic :: Software Development :: Libraries',
'Topic :: Utilities',
]
setup(name='recs',
version=recs.__version__,
author='Tom Ritchford',
author_email='tom@swirly.com',
url='https://github.com/rec/recs',
tests_require=['pytest'],
py_modules=['recs'],
description='Try to import all modules below a given root',
long_description=open('README.rst').read(),
license='MIT',
classifiers=_classifiers,
keywords=['testing', 'importing'],
)
| 29.714286 | 65 | 0.627404 | from setuptools import setup
import recs
_classifiers = [
'Development Status :: 4 - Beta',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Topic :: Software Development :: Libraries',
'Topic :: Utilities',
]
setup(name='recs',
version=recs.__version__,
author='Tom Ritchford',
author_email='tom@swirly.com',
url='https://github.com/rec/recs',
tests_require=['pytest'],
py_modules=['recs'],
description='Try to import all modules below a given root',
long_description=open('README.rst').read(),
license='MIT',
classifiers=_classifiers,
keywords=['testing', 'importing'],
)
| 0 | 0 | 0 |
44679082923e024843dab607e9bd1340db00e86a | 1,850 | py | Python | tests/test_phone_numbers.py | robclewley/scrubadub | 465b5d50e6fc6fb3b3dde49dac5c7d95305a3f0c | [
"MIT"
] | 190 | 2015-12-03T01:31:36.000Z | 2020-09-02T23:46:38.000Z | tests/test_phone_numbers.py | robclewley/scrubadub | 465b5d50e6fc6fb3b3dde49dac5c7d95305a3f0c | [
"MIT"
] | 54 | 2020-09-10T14:46:14.000Z | 2022-03-10T06:03:00.000Z | tests/test_phone_numbers.py | robclewley/scrubadub | 465b5d50e6fc6fb3b3dde49dac5c7d95305a3f0c | [
"MIT"
] | 57 | 2016-04-04T18:37:38.000Z | 2020-08-18T22:59:03.000Z | import unittest
from base import BaseTestCase
| 30.327869 | 75 | 0.557297 | import unittest
from base import BaseTestCase
class PhoneNumberTestCase(unittest.TestCase, BaseTestCase):
def create_docstring(self, phone_number):
return """
BEFORE: My phone number is %s
AFTER: My phone number is {{PHONE}}
""" % phone_number
def check_phone_numbers(self, *phone_numbers):
for phone_number in phone_numbers:
self.compare_before_after(
docstring=self.create_docstring(phone_number),
)
def test_american_phone_number(self):
"""test american-style phone numbers"""
self.check_phone_numbers(
'1-312-515-2239',
'+1-312-515-2239',
'1 (312) 515-2239',
'312-515-2239',
'(312) 515-2239',
'(312)515-2239',
)
def test_extension_phone_numbers(self):
"""test phone numbers with extensions"""
self.check_phone_numbers(
'312-515-2239 x12',
'312-515-2239 ext. 12',
'312-515-2239 ext.12',
)
def test_international_phone_numbers(self):
"""test international phone numbers"""
self.check_phone_numbers(
'+47 21 30 85 99',
'+45 69 19 88 56',
'+46 852 503 499',
'+31 619 837 236',
'+86 135 3727 4136',
'+61267881324',
)
def test_multiple_phone_numbers(self):
# running this through scrubadub.clean replaces 'reached at
# 312.714.8142' with '{{EMAIL}}'. See issue
result = self.clean(
u'Call me on my cell 312.714.8142 or in my office 773.415.7432'
)
self.assertEqual(
result,
u'Call me on my cell {{PHONE}} or in my office {{PHONE}}',
'problem with multiple phone numbers: \n %s' % result,
)
| 779 | 1,000 | 23 |
681206aed8dc1aa26d1177a01471710980f614c7 | 414 | py | Python | website/products/sitemaps.py | zckoh/ecommerce-fullstack | c4cecea3ebaec900da484954d01dcbc2cba325c9 | [
"Apache-2.0"
] | 1 | 2021-12-14T22:24:20.000Z | 2021-12-14T22:24:20.000Z | website/products/sitemaps.py | zckoh/ecommerce-fullstack | c4cecea3ebaec900da484954d01dcbc2cba325c9 | [
"Apache-2.0"
] | 11 | 2021-03-30T13:59:29.000Z | 2022-03-12T00:48:40.000Z | website/products/sitemaps.py | zckoh/ecommerce-fullstack | c4cecea3ebaec900da484954d01dcbc2cba325c9 | [
"Apache-2.0"
] | null | null | null | from django.contrib.sitemaps import Sitemap
from django.shortcuts import reverse
from .models import Product
| 21.789474 | 44 | 0.657005 | from django.contrib.sitemaps import Sitemap
from django.shortcuts import reverse
from .models import Product
class ProductSitemap(Sitemap):
protocol = 'https'
def items(self):
return ['products']
def location(self, item):
return reverse(item)
class AllProductsSitemap(Sitemap):
protocol = 'https'
def items(self):
return Product.objects.all()
| 91 | 159 | 48 |
f34a5b2defe52a683dcc06d84294c96ea1da392b | 481 | py | Python | AuthTest/CustomMiddleModule.py | zhangjiang1203/AuthModuleModel | 04a8ebcba131643595127c254098bc4cfd14dea8 | [
"MIT"
] | null | null | null | AuthTest/CustomMiddleModule.py | zhangjiang1203/AuthModuleModel | 04a8ebcba131643595127c254098bc4cfd14dea8 | [
"MIT"
] | null | null | null | AuthTest/CustomMiddleModule.py | zhangjiang1203/AuthModuleModel | 04a8ebcba131643595127c254098bc4cfd14dea8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019-06-18 14:00
# @Author : 张江
# @Site :
# @File : CustomMiddleModule.py
# @Software: PyCharm
#添加自己的中间件
from django.utils.deprecation import MiddlewareMixin
# 可以在中间件中添加用户认证和登录设置等信息 | 24.05 | 52 | 0.683992 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019-06-18 14:00
# @Author : 张江
# @Site :
# @File : CustomMiddleModule.py
# @Software: PyCharm
#添加自己的中间件
from django.utils.deprecation import MiddlewareMixin
# 可以在中间件中添加用户认证和登录设置等信息
class CustomMiddle(MiddlewareMixin):
def process_request(self, request):
print('自定义中间件请求',request)
def process_response(self, request, response):
print('自定义中间件响应',request,response)
return response | 172 | 15 | 75 |
653fa0e48b9de8ab2cb7131c8a04c498d0598f8f | 5,506 | py | Python | anvilfs/workspacebucket.py | anvilproject/fs.anvilfs | 781b11c0665bf056f90beef5412b79e7c3cb6bd1 | [
"Apache-2.0"
] | 3 | 2021-02-26T06:47:22.000Z | 2022-01-20T19:26:33.000Z | anvilfs/workspacebucket.py | anvilproject/fs.anvilfs | 781b11c0665bf056f90beef5412b79e7c3cb6bd1 | [
"Apache-2.0"
] | 10 | 2020-10-08T22:33:15.000Z | 2021-11-15T18:59:31.000Z | anvilfs/workspacebucket.py | anvilproject/fs.anvilfs | 781b11c0665bf056f90beef5412b79e7c3cb6bd1 | [
"Apache-2.0"
] | null | null | null | from io import BytesIO
from os import SEEK_END, SEEK_SET
import re
import gs_chunked_io as gscio
from .basefile import BaseAnVILFile
from .basefolder import BaseAnVILFolder
| 32.579882 | 96 | 0.592081 | from io import BytesIO
from os import SEEK_END, SEEK_SET
import re
import gs_chunked_io as gscio
from .basefile import BaseAnVILFile
from .basefolder import BaseAnVILFolder
class OtherDataFolder(BaseAnVILFolder):
def __init__(self, attributes, bucket_name):
super().__init__("Other Data")
self.bucket_name = bucket_name
self.attributes = attributes
def lazy_init(self):
# clone it to delete from one while iterating over other
workspacedata = dict(self.attributes)
blocklist_prefixes = [
"referenceData_",
"description",
"tag:"
]
for datum in self.attributes:
for blocked in blocklist_prefixes:
if datum.startswith(blocked):
del workspacedata[datum]
if workspacedata:
wsdf = WorkspaceDataFolder(workspacedata)
self[wsdf.name] = wsdf
_wsb = WorkspaceBucket(self.bucket_name)
self[_wsb.name] = _wsb
class WorkspaceDataFolder(BaseAnVILFolder):
def __init__(self, workspacedata):
super().__init__("Workspace Data")
self.workspacedata = workspacedata
def lazy_init(self):
files = {}
for k in self.workspacedata:
val = self.workspacedata[k]
filetype = self.is_linkable_file(val)
if filetype is not None:
if filetype not in files:
files[filetype] = []
files[filetype].append(val)
linked_files = []
for method in files:
try:
fresh_files = method.factory(files[method])
linked_files.extend(fresh_files)
except Exception as e:
print("ERROR: SKIPPING FILE due to error:")
print(e)
continue
linked_files.append(
WorkspaceData("WorkspaceData.tsv", self.workspacedata))
for f in linked_files:
self[f.name] = f
class WorkspaceBucketSubFolder(BaseAnVILFolder):
def __init__(self, name, bucketpath, bucket_name):
self.bucket_name = bucket_name
self.files = []
self.bucketpath = bucketpath
super().__init__(name)
def lazy_init(self):
pass
def upload(self, fname, read_file):
try:
self["google_bucket"]
except KeyError:
self.google_bucket = self.gc_storage_client.bucket(
self.bucket_name)
with gscio.Writer(self.bucketpath + fname, self.google_bucket) as gsw:
data = read_file.read(gsw.chunk_size)
while data:
gsw.write(data)
data = read_file.read(gsw.chunk_size)
class WorkspaceBucket(BaseAnVILFolder):
def __init__(self, bucket_name):
super().__init__("Files")
self.bucket_name = bucket_name
self.bucket_path = bucket_name + "/Other Data/"
def lazy_init(self):
self.google_bucket = self.gc_storage_client.get_bucket(
self.bucket_name)
blobs = self.google_bucket.list_blobs()
self.initialized = True
for blob in blobs:
self.insert_file(blob)
def insert_file(self, bucket_blob):
# name relative to the path from workspace bucket
path = bucket_blob.name
# handle subfolders like base folders -- dunno why google doesn't
# e.g., list has a/b/ but not a/
if path[-1] == "/":
return
#raise Exception(f"Files should be set, not folders: {path}")
s = path.split("/")
# march to terminal folder, creating along the way
current = self
for i in range(len(s)-1):
subname = s[i]+'/'
if subname not in current:
dir_path = '/'.join(s[:i+1]) + '/'
current[subname] = WorkspaceBucketSubFolder(subname, dir_path, self.bucket_name)
current = current[subname]
current[s[-1]] = WorkspaceBucketFile(bucket_blob)
def upload(self, fname, read_file):
try:
self["google_bucket"]
except KeyError:
self.google_bucket = self.gc_storage_client.bucket(
self.bucket_name)
with gscio.Writer(fname, self.google_bucket) as gsw:
data = read_file.read(gsw.chunk_size)
while data:
gsw.write(data)
data = read_file.read(gsw.chunk_size)
class WorkspaceBucketFile(BaseAnVILFile):
def __init__(self, blob):
self.name = blob.name.split("/")[-1]
self.size = blob.size
self.last_modified = blob.updated
self.blob_handle = blob
self.is_dir = False
def get_bytes_handler(self):
return gscio.Reader(self.blob_handle)
class WorkspaceData(BaseAnVILFile):
def __init__(self, name, data_dict):
self.name = name
self.buffer = self._dict_to_buffer(data_dict)
self.last_modified = None
def _dict_to_buffer(self, d):
# only keys that match the below regex are valid
keys = [k for k in d.keys() if bool(
re.match("^[A-Za-z0-9_-]*$", k))]
data = ""
for k in keys:
data += f"{k}\t{d[k]}\n"
buffer = BytesIO(data.encode('utf-8'))
position = buffer.tell()
buffer.seek(0, SEEK_END)
self.size = buffer.tell()
buffer.seek(position, SEEK_SET)
return buffer
def get_bytes_handler(self):
return self.buffer
| 4,641 | 119 | 564 |
719a7ec92140a49dcb3ff7ab37cd2451db2334c9 | 439 | py | Python | scripts/getLabelsForConceptUris.py | TobiasNx/fix-FunctionalReview-Testing | 6169332db22224ec64b6c3fe4ea4b31fa6a4c5c8 | [
"MIT"
] | null | null | null | scripts/getLabelsForConceptUris.py | TobiasNx/fix-FunctionalReview-Testing | 6169332db22224ec64b6c3fe4ea4b31fa6a4c5c8 | [
"MIT"
] | null | null | null | scripts/getLabelsForConceptUris.py | TobiasNx/fix-FunctionalReview-Testing | 6169332db22224ec64b6c3fe4ea4b31fa6a4c5c8 | [
"MIT"
] | null | null | null | import requests
import json
filepath = '../data/maps/edusharing-subject-mapping.tsv'
input = open(filepath).readlines()
for row in input:
if "https" in row:
uri = row.split('\t')[1].rstrip()
label = requests.get(uri, headers={"accept":"application/json"}).json()['prefLabel']['de'].encode('utf-8').strip()
with open('../data/maps/subject-labels.tsv', 'a') as f:
f.write(uri + "\t" + label + "\n") | 33.769231 | 122 | 0.605923 | import requests
import json
filepath = '../data/maps/edusharing-subject-mapping.tsv'
input = open(filepath).readlines()
for row in input:
if "https" in row:
uri = row.split('\t')[1].rstrip()
label = requests.get(uri, headers={"accept":"application/json"}).json()['prefLabel']['de'].encode('utf-8').strip()
with open('../data/maps/subject-labels.tsv', 'a') as f:
f.write(uri + "\t" + label + "\n") | 0 | 0 | 0 |
136545fce3e8cecf2001e7bdbd83ade947e69365 | 245 | py | Python | 7kyu/check_for_prime_numbers.py | nhsz/codewars | 82703959e910254d6feff4162f78c6dbd7a1c3ed | [
"MIT"
] | 1 | 2018-12-02T23:04:38.000Z | 2018-12-02T23:04:38.000Z | 7kyu/check_for_prime_numbers.py | nhsz/codewars | 82703959e910254d6feff4162f78c6dbd7a1c3ed | [
"MIT"
] | null | null | null | 7kyu/check_for_prime_numbers.py | nhsz/codewars | 82703959e910254d6feff4162f78c6dbd7a1c3ed | [
"MIT"
] | null | null | null | # http://www.codewars.com/kata/53daa9e5af55c184db00025f/
| 22.272727 | 56 | 0.522449 | # http://www.codewars.com/kata/53daa9e5af55c184db00025f/
def is_prime(n):
if n <= 1:
return False
else:
for div in xrange(2, n):
if n % div == 0 and n != div:
return False
return True
| 165 | 0 | 23 |
a486d371c1af152079a6a939561c7002483891ad | 149 | py | Python | demo/test.py | aravindhk/Vides | 65d9ea9764ddf5f6ef40e869bd31387d0e3e378f | [
"BSD-4-Clause"
] | 2 | 2021-11-03T17:24:24.000Z | 2021-12-02T06:06:50.000Z | demo/test.py | aravindhk/Vides | 65d9ea9764ddf5f6ef40e869bd31387d0e3e378f | [
"BSD-4-Clause"
] | null | null | null | demo/test.py | aravindhk/Vides | 65d9ea9764ddf5f6ef40e869bd31387d0e3e378f | [
"BSD-4-Clause"
] | null | null | null | from NanoTCAD_ViDES import *
from numpy import genfromtxt
fi = genfromtxt("./datiout_idvds/idvds.out", delimiter = ' ')
plot(fi[:,0],fi[:,1])
show() | 24.833333 | 61 | 0.704698 | from NanoTCAD_ViDES import *
from numpy import genfromtxt
fi = genfromtxt("./datiout_idvds/idvds.out", delimiter = ' ')
plot(fi[:,0],fi[:,1])
show() | 0 | 0 | 0 |
e765fb3f3635f387b5b8188b7acfcdc41c6bffec | 894 | py | Python | test/test_substitution.py | corneliusroemer/pyro-cov | 54e89d128293f9ff9e995c442f72fa73f5f99b76 | [
"Apache-2.0"
] | 22 | 2021-09-14T04:33:11.000Z | 2022-02-01T21:33:05.000Z | test/test_substitution.py | corneliusroemer/pyro-cov | 54e89d128293f9ff9e995c442f72fa73f5f99b76 | [
"Apache-2.0"
] | 7 | 2021-11-02T13:48:35.000Z | 2022-03-23T18:08:35.000Z | test/test_substitution.py | corneliusroemer/pyro-cov | 54e89d128293f9ff9e995c442f72fa73f5f99b76 | [
"Apache-2.0"
] | 6 | 2021-09-18T01:06:51.000Z | 2022-01-10T02:22:06.000Z | # Copyright Contributors to the Pyro-Cov project.
# SPDX-License-Identifier: Apache-2.0
import pyro.poutine as poutine
import pytest
import torch
from pyro.infer.autoguide import AutoDelta
from pyrocov.substitution import GeneralizedTimeReversible, JukesCantor69
@pytest.mark.parametrize("Model", [JukesCantor69, GeneralizedTimeReversible])
| 29.8 | 77 | 0.694631 | # Copyright Contributors to the Pyro-Cov project.
# SPDX-License-Identifier: Apache-2.0
import pyro.poutine as poutine
import pytest
import torch
from pyro.infer.autoguide import AutoDelta
from pyrocov.substitution import GeneralizedTimeReversible, JukesCantor69
@pytest.mark.parametrize("Model", [JukesCantor69, GeneralizedTimeReversible])
def test_matrix_exp(Model):
model = Model()
guide = AutoDelta(model)
guide()
trace = poutine.trace(guide).get_trace()
t = torch.randn(10).exp()
with poutine.replay(trace=trace):
m = model()
assert torch.allclose(model(), m)
exp_mt = (m * t[:, None, None]).matrix_exp()
actual = model.matrix_exp(t)
assert torch.allclose(actual, exp_mt, atol=1e-6)
actual = model.log_matrix_exp(t)
log_exp_mt = exp_mt.log()
assert torch.allclose(actual, log_exp_mt, atol=1e-6)
| 527 | 0 | 22 |
4542387fbb3b4014a1c19c862b65107f76ff164c | 6,667 | py | Python | range_repair.py | hancockks/cassandra_range_repair | 14e21826cd43e0df15e8f7f9d3e8842f5339bda8 | [
"MIT"
] | null | null | null | range_repair.py | hancockks/cassandra_range_repair | 14e21826cd43e0df15e8f7f9d3e8842f5339bda8 | [
"MIT"
] | null | null | null | range_repair.py | hancockks/cassandra_range_repair | 14e21826cd43e0df15e8f7f9d3e8842f5339bda8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import operator
import optparse
import os
import re
import subprocess
import sys
import datetime
#def lrange(num1, num2 = None, step = 1, format = format_murmur):
# offset = 0 if format == format_md5 else 2**63
# max = 2**127-1 if format == format_md5 else 2**63-1
# wrap = 2**128 if format == format_md5 else 2**64
#
# print "%d %d" % (num1+offset, num2+offset)
# while (num1 + offset < num2 + offset):
# yield num1
# num1 += step
# if num1 > max:
# num1 -= wrap
# for i in ring:
# if token > i:
# return i
#
# if is_murmur_ring(ring):
# return 2**63 - 1
#
# return 2**127 - 1
# for i in lrange(start + step_increment, stop + 1, step_increment, format):
# print "start = %d, i = %d" % (start, i)
# yield start, i
# start = i
if __name__ == '__main__':
main()
# success, ring_tokens, error = get_ring_tokens()
# success, host_token, error = get_host_token()
# range_termination = get_range_termination(host_token, ring_tokens)
# steps = 100
# print repr(is_murmur_ring(ring_tokens))
# print repr(get_ring_tokens())
# print repr(get_host_token())
# print repr(get_range_termination(host_token, ring_tokens))
# print repr(get_sub_range_generator(host_token, range_termination, steps).next())
| 29.763393 | 161 | 0.612719 | #!/usr/bin/env python
import operator
import optparse
import os
import re
import subprocess
import sys
import datetime
def format_murmur(i):
return "%020d" % i
def format_md5(i):
return "%039d" % i
#def lrange(num1, num2 = None, step = 1, format = format_murmur):
# offset = 0 if format == format_md5 else 2**63
# max = 2**127-1 if format == format_md5 else 2**63-1
# wrap = 2**128 if format == format_md5 else 2**64
#
# print "%d %d" % (num1+offset, num2+offset)
# while (num1 + offset < num2 + offset):
# yield num1
# num1 += step
# if num1 > max:
# num1 -= wrap
def run_command(command, *args):
cmd = " ".join([command] + list(args))
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
proc.wait()
return proc.returncode == 0, proc.returncode, cmd, proc.stdout.read(), proc.stderr.read()
def is_murmur_ring(ring):
for i in ring:
if i < 0:
return True
return False
def get_time():
#lazy, but efficient...chop to 3 decimals
return unicode(datetime.datetime.now())[:-3]
def get_keyspaces():
keyspaces = set([])
success, return_code, _, stdout, stderr = run_command("nodetool", "cfstats")
if not success:
return False, [], stderr
a = re.compile("^Keyspace: (.*)$")
for line in stdout.split("\n"):
m = a.match(line)
if m is not None:
keyspaces.add(m.group(1))
return True, keyspaces, None
def get_ring_tokens():
tokens = []
success, return_code, _, stdout, stderr = run_command("nodetool", "ring")
if not success:
return False, [], stderr
for line in stdout.split("\n")[6:]:
segments = line.split()
if len(segments) == 8:
tokens.append(int(segments[-1]))
return True, tokens, None
def get_host_token():
success, return_code, _, stdout, stderr = run_command("nodetool", "info")
if not success or stdout.find("Token") != 0:
return False, None, stderr
return True, int(stdout.split()[2]), None
def get_range_start(token, ring):
return ring[(ring.index(token) -1 + len(ring)) % len(ring)]
def get_range_termination(token, ring):
return token
# for i in ring:
# if token > i:
# return i
#
# if is_murmur_ring(ring):
# return 2**63 - 1
#
# return 2**127 - 1
def get_sub_range_generator(start, stop, steps=100, format=format_murmur):
min = 0 if format == format_md5 else -2**63
max = 2**127-1 if format == format_md5 else 2**63-1
wrap = 2**128 if format == format_md5 else 2**64
count = stop - start if stop > start else max-start + stop-min
step_increment = count / steps
done = 0
for step in xrange(steps):
if step == steps - 1:
step_increment = count - done
end = start + step_increment
if end > max:
end -= wrap
yield start, end
done += step_increment
start = end
# for i in lrange(start + step_increment, stop + 1, step_increment, format):
# print "start = %d, i = %d" % (start, i)
# yield start, i
# start = i
def repair_range(keyspace, start, end):
success, return_code, cmd, stdout, stderr = \
run_command("nodetool", "repair %s -local -snapshot -pr -st %s -et %s" % (keyspace, start, end))
return success, cmd, stdout, stderr
def format_murmur(i):
return "%020d" % i
def format_md5(i):
return "%039d" % i
def repair_keyspace(keyspace, steps=100, verbose=True):
success, ring_tokens, error = get_ring_tokens()
if not success:
print "Error fetching ring tokens"
print error
return False
success, host_token, error = get_host_token()
if not success:
print "Error fetching host token"
print error
return False
range_start = get_range_start(host_token, ring_tokens)
range_termination = get_range_termination(host_token, ring_tokens)
formatter = format_murmur if is_murmur_ring(ring_tokens) else format_md5
if verbose:
print "%s repair over range (%s, %s] with %s steps for keyspace %s" % (get_time(), formatter(range_start), formatter(range_termination), steps, keyspace)
for start, end in get_sub_range_generator(range_start, range_termination, steps):
start = formatter(start)
end = formatter(end)
if verbose:
print "%s step %04d repairing range (%s, %s] for keyspace %s ... " % (get_time(), steps, start, end, keyspace),
success, cmd, stdout, stderr = repair_range(keyspace, start, end)
if not success:
print "FAILED"
print cmd
print stderr
return False
if verbose:
print "SUCCESS"
steps -= 1
return True
def main():
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-k", "--keyspace", dest="keyspace",
help="keyspace to repair", metavar="KEYSPACE")
parser.add_option("-s", "--steps", dest="steps", type="int", default=100,
help="number of discrete ranges", metavar="STEPS")
parser.add_option("-q", "--quiet",
action="store_false", dest="verbose", default=True,
help="don't print status messages to stdout")
(options, args) = parser.parse_args()
# if not options.keyspace:
# parser.print_help()
# sys.exit(1)
if not options.keyspace:
sys_keyspaces = set(["system_traces", "system", "solr_admin", "system_auth", "OpsCenter"])
success, keyspaces, error = get_keyspaces()
if not success:
print "Error fetching kesypaces"
print error
sys.exit(2)
keyspaces = keyspaces.difference(sys_keyspaces)
for ks in keyspaces:
if not repair_keyspace(ks, options.steps, options.verbose):
sys.exit(2)
sys.exit(0)
else:
if repair_keyspace(options.keyspace, options.steps, options.verbose):
sys.exit(0)
sys.exit(2)
if __name__ == '__main__':
main()
# success, ring_tokens, error = get_ring_tokens()
# success, host_token, error = get_host_token()
# range_termination = get_range_termination(host_token, ring_tokens)
# steps = 100
# print repr(is_murmur_ring(ring_tokens))
# print repr(get_ring_tokens())
# print repr(get_host_token())
# print repr(get_range_termination(host_token, ring_tokens))
# print repr(get_sub_range_generator(host_token, range_termination, steps).next())
| 4,962 | 0 | 384 |
d1e472cf17de0fbe8a6a5b4389fb7607c0ac78a1 | 225 | py | Python | room/admin.py | iver56/useat-api | f0f241c584cbb05990ee4afb5bb1b2fd6d5fdb44 | [
"MIT"
] | null | null | null | room/admin.py | iver56/useat-api | f0f241c584cbb05990ee4afb5bb1b2fd6d5fdb44 | [
"MIT"
] | null | null | null | room/admin.py | iver56/useat-api | f0f241c584cbb05990ee4afb5bb1b2fd6d5fdb44 | [
"MIT"
] | null | null | null | from django.contrib.gis import admin
from .models import Room
admin.site.register(Room, RoomAdmin)
| 18.75 | 36 | 0.76 | from django.contrib.gis import admin
from .models import Room
class RoomAdmin(admin.OSMGeoAdmin):
default_lon = 1157108.48900
default_lat = 9205549.12020
default_zoom = 12
admin.site.register(Room, RoomAdmin)
| 0 | 100 | 23 |
afb6d94d3d73198348ac427a1fcadd42592842ca | 1,843 | py | Python | src/surface.py | BozeBro/Snake-Game-and-two-player | 13989418ff92d9e5cc0e218b75cd8bd1ed1ca591 | [
"MIT"
] | null | null | null | src/surface.py | BozeBro/Snake-Game-and-two-player | 13989418ff92d9e5cc0e218b75cd8bd1ed1ca591 | [
"MIT"
] | null | null | null | src/surface.py | BozeBro/Snake-Game-and-two-player | 13989418ff92d9e5cc0e218b75cd8bd1ed1ca591 | [
"MIT"
] | null | null | null | import pygame
from colors import *
class Surface:
"""
Handles functions that interact with the screen including:
drawing to the screen
creating the screen
"""
def __init__(
self, rows=17, columns=17, blocksize=20, caption="Snake Game", color=WHITE
):
"""
:param:
(rows=17, columns=17, blocksize=20, caption="Snake Game")
rows - tells how many rows there will be
columns - tells how many columns there will be
blocksize - tells how big a square
caption - tells what the title in the game window
color - tells what the color of screen is
"""
# constants
self.rows = rows
self.columns = columns
self.blocksize = blocksize
self.caption = caption
self.color = color
def make_screen(self):
"""
Initializes the screen object where the game is played.
Only used at runtime, or when game plays
"""
self.screen = pygame.display.set_mode(
(self.rows * self.blocksize, self.columns * self.blocksize)
)
pygame.display.set_caption(self.caption)
self.screen.fill(self.color)
def make_rect(self, x, y, color, **kwargs):
"""
Used by apple and snake object.
Draws a rectangle onto the screen.
"""
rect = pygame.Rect(x, y, self.blocksize, self.blocksize)
pygame.draw.rect(self.screen, color, rect, **kwargs)
pygame.display.update(rect)
if __name__ == "__main__":
# Test for creation of the screen
pygame.init()
surface = Surface(color=WHITE)
surface.make_screen()
pygame.display.flip()
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
| 29.725806 | 82 | 0.600651 | import pygame
from colors import *
class Surface:
"""
Handles functions that interact with the screen including:
drawing to the screen
creating the screen
"""
def __init__(
self, rows=17, columns=17, blocksize=20, caption="Snake Game", color=WHITE
):
"""
:param:
(rows=17, columns=17, blocksize=20, caption="Snake Game")
rows - tells how many rows there will be
columns - tells how many columns there will be
blocksize - tells how big a square
caption - tells what the title in the game window
color - tells what the color of screen is
"""
# constants
self.rows = rows
self.columns = columns
self.blocksize = blocksize
self.caption = caption
self.color = color
def make_screen(self):
"""
Initializes the screen object where the game is played.
Only used at runtime, or when game plays
"""
self.screen = pygame.display.set_mode(
(self.rows * self.blocksize, self.columns * self.blocksize)
)
pygame.display.set_caption(self.caption)
self.screen.fill(self.color)
def make_rect(self, x, y, color, **kwargs):
"""
Used by apple and snake object.
Draws a rectangle onto the screen.
"""
rect = pygame.Rect(x, y, self.blocksize, self.blocksize)
pygame.draw.rect(self.screen, color, rect, **kwargs)
pygame.display.update(rect)
if __name__ == "__main__":
# Test for creation of the screen
pygame.init()
surface = Surface(color=WHITE)
surface.make_screen()
pygame.display.flip()
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
| 0 | 0 | 0 |
a20e5233d64cfa59b698bedc530b40a510125ce8 | 3,311 | py | Python | Course 2: Python Data Structures/Week 1 (Strings)/String-Comparisons.py | kunal5042/Python-for-Everybody | ed702f92c963a467ffb682f171ba0bbb1b571726 | [
"MIT"
] | null | null | null | Course 2: Python Data Structures/Week 1 (Strings)/String-Comparisons.py | kunal5042/Python-for-Everybody | ed702f92c963a467ffb682f171ba0bbb1b571726 | [
"MIT"
] | null | null | null | Course 2: Python Data Structures/Week 1 (Strings)/String-Comparisons.py | kunal5042/Python-for-Everybody | ed702f92c963a467ffb682f171ba0bbb1b571726 | [
"MIT"
] | null | null | null | stringVar1 = "190905042"
stringVar2 = "5042"
stringVar3 = "Kunal"
stringVar4 = "Wadhwa"
stringVar5 = "abc"
stringVar = ["Kunal", "Tanya", "Olivia", "5042", "123"]
WeightOfArrayOfStrings(stringVar)
MaximumAndMinimum_Value_Of_Strings(stringVar)
stringVar = ["AAA", "BBB", "CCC", "DDD"]
WeightOfArrayOfStrings(stringVar)
MaximumAndMinimum_Value_Of_Strings(stringVar)
stringVar = ["1234", "5678", "91011", "12131"]
WeightOfArrayOfStrings(stringVar)
MaximumAndMinimum_Value_Of_Strings(stringVar)
MaximumAndMinimum_Value_Of_Strings(stringVar1)
MaximumAndMinimum_Value_Of_Strings(stringVar2)
MaximumAndMinimum_Value_Of_Strings(stringVar3)
WeightOfArrayOfStrings(stringVar4)
MaximumAndMinimum_Value_Of_Strings(stringVar4)
MaximumAndMinimum_Value_Of_Strings(stringVar5)
"""
Output:
Index: 0 String : Kunal Weight : 507
Index: 1 String : Tanya Weight : 509
Index: 2 String : Olivia Weight : 612
Index: 3 String : 5042 Weight : 203
Index: 4 String : 123 Weight : 150
Maximum : Tanya Weight : 509
Minimum : 123 Weight : 150
Index: 0 String : AAA Weight : 195
Index: 1 String : BBB Weight : 198
Index: 2 String : CCC Weight : 201
Index: 3 String : DDD Weight : 204
Maximum : DDD Weight : 204
Minimum : AAA Weight : 195
Index: 0 String : 1234 Weight : 202
Index: 1 String : 5678 Weight : 218
Index: 2 String : 91011 Weight : 252
Index: 3 String : 12131 Weight : 248
Maximum : 91011 Weight : 252
Minimum : 12131 Weight : 248
Maximum : 9 Weight : 57
Minimum : 0 Weight : 48
Maximum : 5 Weight : 53
Minimum : 0 Weight : 48
Maximum : u Weight : 117
Minimum : K Weight : 75
Index: 0 String : W Weight : 87
Index: 1 String : a Weight : 97
Index: 2 String : d Weight : 100
Index: 3 String : h Weight : 104
Index: 4 String : w Weight : 119
Index: 5 String : a Weight : 97
Maximum : w Weight : 119
Minimum : W Weight : 87
Maximum : c Weight : 99
Minimum : a Weight : 97
""" | 30.657407 | 133 | 0.652371 | stringVar1 = "190905042"
stringVar2 = "5042"
stringVar3 = "Kunal"
stringVar4 = "Wadhwa"
stringVar5 = "abc"
def WeightOfString(stringVar):
total = 0
for character in stringVar:
total += int(ord(character))
return total
def WeightOfArrayOfStrings(stringArrayVar):
count = -1
for string in stringArrayVar:
count += 1
print("Index: " + str(count) + "\tString : " + string + "\t\tWeight \t: " + str(WeightOfString(string)))
def MaximumAndMinimum_Value_Of_Strings(stringVar):
String_With_Minimum_Value = None
String_With_Maximum_Value = None
for string in stringVar:
if String_With_Minimum_Value is None:
String_With_Minimum_Value = string
if String_With_Maximum_Value is None:
String_With_Maximum_Value = string
if String_With_Minimum_Value > string:
String_With_Minimum_Value = string
if String_With_Maximum_Value < string:
String_With_Maximum_Value = string
print("Maximum : " + String_With_Maximum_Value + "\t\tWeight \t: " + (str(WeightOfString(String_With_Maximum_Value))))
print("Minimum : " + String_With_Minimum_Value + "\t\tWeight \t: " + (str(WeightOfString(String_With_Minimum_Value))))
print()
stringVar = ["Kunal", "Tanya", "Olivia", "5042", "123"]
WeightOfArrayOfStrings(stringVar)
MaximumAndMinimum_Value_Of_Strings(stringVar)
stringVar = ["AAA", "BBB", "CCC", "DDD"]
WeightOfArrayOfStrings(stringVar)
MaximumAndMinimum_Value_Of_Strings(stringVar)
stringVar = ["1234", "5678", "91011", "12131"]
WeightOfArrayOfStrings(stringVar)
MaximumAndMinimum_Value_Of_Strings(stringVar)
MaximumAndMinimum_Value_Of_Strings(stringVar1)
MaximumAndMinimum_Value_Of_Strings(stringVar2)
MaximumAndMinimum_Value_Of_Strings(stringVar3)
WeightOfArrayOfStrings(stringVar4)
MaximumAndMinimum_Value_Of_Strings(stringVar4)
MaximumAndMinimum_Value_Of_Strings(stringVar5)
"""
Output:
Index: 0 String : Kunal Weight : 507
Index: 1 String : Tanya Weight : 509
Index: 2 String : Olivia Weight : 612
Index: 3 String : 5042 Weight : 203
Index: 4 String : 123 Weight : 150
Maximum : Tanya Weight : 509
Minimum : 123 Weight : 150
Index: 0 String : AAA Weight : 195
Index: 1 String : BBB Weight : 198
Index: 2 String : CCC Weight : 201
Index: 3 String : DDD Weight : 204
Maximum : DDD Weight : 204
Minimum : AAA Weight : 195
Index: 0 String : 1234 Weight : 202
Index: 1 String : 5678 Weight : 218
Index: 2 String : 91011 Weight : 252
Index: 3 String : 12131 Weight : 248
Maximum : 91011 Weight : 252
Minimum : 12131 Weight : 248
Maximum : 9 Weight : 57
Minimum : 0 Weight : 48
Maximum : 5 Weight : 53
Minimum : 0 Weight : 48
Maximum : u Weight : 117
Minimum : K Weight : 75
Index: 0 String : W Weight : 87
Index: 1 String : a Weight : 97
Index: 2 String : d Weight : 100
Index: 3 String : h Weight : 104
Index: 4 String : w Weight : 119
Index: 5 String : a Weight : 97
Maximum : w Weight : 119
Minimum : W Weight : 87
Maximum : c Weight : 99
Minimum : a Weight : 97
""" | 1,127 | 0 | 69 |
1980deaa8c57e0c5df32e9054d7ffab90bfae55b | 83,912 | py | Python | privex/helpers/net/socket.py | Privex/python-helpers | 1c976ce5b0e2c5241ea0bdf330bd6701b5e31153 | [
"X11"
] | 12 | 2019-06-18T11:17:41.000Z | 2021-09-13T23:00:21.000Z | privex/helpers/net/socket.py | Privex/python-helpers | 1c976ce5b0e2c5241ea0bdf330bd6701b5e31153 | [
"X11"
] | 1 | 2019-10-13T07:34:44.000Z | 2019-10-13T07:34:44.000Z | privex/helpers/net/socket.py | Privex/python-helpers | 1c976ce5b0e2c5241ea0bdf330bd6701b5e31153 | [
"X11"
] | 4 | 2019-10-10T10:15:09.000Z | 2021-05-16T01:55:48.000Z | """
Various wrapper functions/classes which use :mod:`socket` or are strongly tied to functions in this file
which use :mod:`socket`. Part of :mod:`privex.helpers.net` - network related helper code.
**Copyright**::
+===================================================+
| © 2019 Privex Inc. |
| https://www.privex.io |
+===================================================+
| |
| Originally Developed by Privex Inc. |
| License: X11 / MIT |
| |
| Core Developer(s): |
| |
| (+) Chris (@someguy123) [Privex] |
| (+) Kale (@kryogenic) [Privex] |
| |
+===================================================+
Copyright 2019 Privex Inc. ( https://www.privex.io )
"""
import asyncio
import functools
import socket
import ssl
import time
from ipaddress import ip_network
from typing import Any, Callable, Generator, IO, Iterable, List, Optional, Tuple, Union
import attr
from privex.helpers import settings
from privex.helpers.common import LayeredContext, byteify, empty, empty_if, is_true, stringify, strip_null
from privex.helpers.thread import SafeLoopThread
from privex.helpers.asyncx import await_if_needed, run_coro_thread
from privex.helpers.net.util import generate_http_request, get_ssl_context, ip_is_v6, ip_sock_ver, is_ip
from privex.helpers.net.dns import resolve_ip, resolve_ip_async
from privex.helpers.types import AUTO, AUTO_DETECTED, AnyNum, STRBYTES, T
import logging
log = logging.getLogger(__name__)
__all__ = [
'AnySocket', 'OpAnySocket', 'SocketContextManager',
'StopLoopOnMatch', 'SocketWrapper', 'AsyncSocketWrapper', 'send_data_async', 'send_data', 'upload_termbin',
'upload_termbin_file', 'upload_termbin_async', 'upload_termbin_file_async'
]
AnySocket = Union[ssl.SSLSocket, "socket.socket"]
OpAnySocket = Optional[Union[ssl.SSLSocket, "socket.socket"]]
@attr.s
class SocketTracker:
"""
Data class used by :class:`.SocketWrapper` / :class:`.AsyncSocketWrapper` for managing sockets
"""
host: str = attr.ib()
port: int = attr.ib(converter=int)
timeout: Union[int, float] = attr.ib(factory=lambda: settings.DEFAULT_SOCKET_TIMEOUT)
server: bool = attr.ib(default=False, converter=is_true)
connected: bool = attr.ib(default=False, converter=is_true)
binded: bool = attr.ib(default=False, converter=is_true)
listening: bool = attr.ib(default=False, converter=is_true)
use_ssl: bool = attr.ib(default=False, converter=is_true)
socket_conf: dict = attr.ib(factory=dict)
ssl_conf: dict = attr.ib(factory=dict)
ssl_wrap_conf: dict = attr.ib(factory=dict)
hostname: str = attr.ib(default=None)
_ssl_context: ssl.SSLContext = attr.ib(default=None)
_ssl_socket: ssl.SSLSocket = attr.ib(default=None)
_loop: asyncio.AbstractEventLoop = attr.ib(default=None)
_socket: AnySocket = attr.ib(default=None)
_socket_layer_ctx = attr.ib(default=None)
_host_v4: Optional[str] = attr.ib(default=None)
_host_v6: Optional[str] = attr.ib(default=None)
_host_v4_resolved: bool = attr.ib(default=False)
_host_v6_resolved: bool = attr.ib(default=False)
@property
@family.setter
@property
@property
@property
@socket.setter
@property
@socket_layer_ctx.setter
@property
@ssl_context.setter
@property
@ssl_socket.setter
@property
@property
@property
@property
connected_ip = ip_address
@property
@classmethod
class SocketWrapper(object):
"""
A wrapper class to make working with :class:`socket.socket` much simpler.
.. NOTE:: For AsyncIO, use :class:`.AsyncSocketWrapper` instead.
**Features**
* Automatic address family detection - detects whether you have working IPv4 / IPv6, and decides the best way
to connect to a host, depending on what IP versions that host supports
* ``Happy Eyeballs`` for IPv6. If something goes wrong with an IPv6 connection, it will fallback to IPv4 if the
host has it available (i.e. a domain with both ``A`` and ``AAAA`` records)
* Easy to use SSL, which works with HTTPS and other SSL-secured protocols. Just pass ``use_ssl=True`` in the constructor.
* Many wrapper methods such as :meth:`.recv_eof`, :meth:`.query`, and :meth:`.http_request` to make working
with sockets much easier.
**Examples**
Send a string of bytes / text to a server, and then read until EOF::
>>> sw = SocketWrapper('icanhazip.org', 80)
>>> res = sw.query("GET / HTTP/1.1\\nHost: icanhazip.com\\n\\n")
>>> print(res)
HTTP/1.1 200 OK
Server: nginx
Content-Type: text/plain; charset=UTF-8
Content-Length: 17
x-rtfm: Learn about this site at http://bit.ly/icanhazip-faq and do not abuse the service.
2a07:e00::abc
For basic HTTP requests, you can use :meth:`.http_request`, which will automatically send ``Host`` (based on the host you passed),
and ``User-Agent``. SSL works too, just set ``use_ssl=True``::
>>> sw = SocketWrapper('myip.privex.io', 443, use_ssl=True)
>>> res = sw.http_request('/?format=json')
>>> print(res)
HTTP/1.1 200 OK
Server: nginx
Date: Tue, 22 Sep 2020 03:40:48 GMT
Content-Type: application/json
Content-Length: 301
Connection: close
Access-Control-Allow-Origin: *
{"error":false,"geo":{"as_name":"Privex Inc.","as_number":210083,"city":"Stockholm","country":"Sweden",
"country_code":"SE","error":false,"zip":"173 11"},"ip":"2a07:e00::abc","ip_type":"ipv6","ip_valid":true,
"messages":[], "ua":"Python Privex Helpers ( https://github.com/Privex/python-helpers )"}
Standard low-level sending and receiving data::
>>> sw = SocketWrapper('127.0.0.1', 8888)
>>> sw.sendall(b"hello world") # Send the text 'hello world'
>>> sw.recv(64) # read up to 64 bytes of data from the socket
b"lorem ipsum\n"
"""
DEFAULT_TIMEOUT = empty_if(socket.getdefaulttimeout(), settings.DEFAULT_SOCKET_TIMEOUT, zero=True)
_context: Optional[ssl.SSLContext]
_socket: OpAnySocket
_base_socket: Optional[socket.socket]
_ssl_socket: Optional[ssl.SSLSocket]
_layer_context: Optional[LayeredContext]
_socket_ctx_mgr: SocketContextManager
# connected: bool
auto_connect: bool
auto_listen: bool
listen_backlog: int
tracker: SocketTracker
@property
@ssl_conf.setter
@property
@ssl_wrap_conf.setter
@property
@socket_conf.setter
@property
@property
@timeout.setter
@property
@hostname.setter
@property
ssl_context = context
@property
@base_socket.setter
@property
@socket.setter
@property
# @connected.setter
# def connected(self, value):
# self.tracker.connected = value
@_sockwrapper_auto_connect()
@_sockwrapper_auto_connect()
@_sockwrapper_auto_connect()
@_sockwrapper_auto_connect()
@_sockwrapper_auto_connect()
@_sockwrapper_auto_connect()
@_sockwrapper_auto_connect()
@_sockwrapper_auto_connect()
@_sockwrapper_auto_connect()
@_sockwrapper_auto_connect()
@_sockwrapper_auto_connect()
# @_sockwrapper_auto_connect()
# def query(self, data: Union[str, bytes], bufsize: int = 32, eof_timeout=30, **kwargs):
# timeout_fail, send_flags = kwargs.get('timeout_fail'), kwargs.get('send_flags', kwargs.get('flags', None))
# recv_flags = kwargs.get('recv_flags', kwargs.get('flags', None))
# log.debug(" >> Sending %s bytes to %s:%s", len(data), self.host, self.port)
# self.sendall(byteify(data), flags=send_flags)
# log.debug(" >> Reading %s bytes per chunk from %s:%s", bufsize, self.host, self.port)
# return self.read_eof(bufsize, eof_timeout=eof_timeout, flags=recv_flags, timeout_fail=timeout_fail)
# @_sockwrapper_auto_connect()
# def http_request(
# self, url="/", host=AUTO_DETECTED, method="GET", user_agent=DEFAULT_USER_AGENT, extra_data: Union[STRBYTES, List[str]] = None,
# body: STRBYTES = None, eof_timeout=30, **kwargs
# ) -> Union[bytes, Awaitable[bytes]]:
# bufsize, flags, timeout_fail = kwargs.pop('bufsize', 256), kwargs.pop('flags', None), kwargs.pop('timeout_fail', False)
# data = self._http_request(url, host=host, method=method, user_agent=user_agent, extra=extra_data, body=body, **kwargs)
# self.sendall(data, flags=flags)
# return self.read_eof(bufsize, eof_timeout=eof_timeout, flags=flags, timeout_fail=timeout_fail)
@_sockwrapper_auto_connect()
@_sockwrapper_auto_connect()
@_sockwrapper_auto_connect()
@_sockwrapper_auto_connect()
@classmethod
class AsyncSocketWrapper(SocketWrapper):
"""
>>> from privex.helpers import AsyncSocketWrapper
>>> sw = AsyncSocketWrapper('termbin.com', 9999)
>>> url = await sw.query("HELLO world\\n\\nThis is a test\\nusing async sockets\\n\\nwith Python")
'https://termbin.com/lsd93'
>>> url = await sw.read_eof()
"""
_loop: Optional[asyncio.AbstractEventLoop]
DEFAULT_TIMEOUT = empty_if(socket.getdefaulttimeout(), settings.DEFAULT_SOCKET_TIMEOUT, zero=True)
@property
@_async_sockwrapper_auto_connect()
@_async_sockwrapper_auto_connect()
@_async_sockwrapper_auto_connect()
@_async_sockwrapper_auto_connect()
@_async_sockwrapper_auto_connect()
@_async_sockwrapper_auto_connect()
@_async_sockwrapper_auto_connect()
@_async_sockwrapper_auto_connect()
@_sockwrapper_auto_connect()
async def send_data_async(
host: str, port: int, data: Union[bytes, str, Iterable], timeout: AnyNum = None, **kwargs
) -> Optional[Union[str, bytes]]:
"""
>>> await send_data_async('termbin.com', 9999, "hello world\\nthis is a test\\n\\nlorem ipsum dolor\\n")
'https://termbin.com/oi07'
:param host:
:param port:
:param data:
:param timeout:
:param kwargs:
:return:
"""
fhost = f"({host}):{port}"
chunk_size = int(kwargs.get('chunk', kwargs.get('chunk_size', 64)))
string_result = is_true(kwargs.get('string_result', True))
strip_result = is_true(kwargs.get('strip_result', True))
fail = is_true(kwargs.get('fail', True))
ip_version = kwargs.get('ip_version', 'any')
timeout = empty_if(timeout, empty_if(socket.getdefaulttimeout(), 15, zero=True), zero=True)
is_iter, data_iter = False, None
if data is not None:
if isinstance(data, (str, bytes, int, float)):
data = byteify(data)
else:
try:
data_iter = iter(data)
is_iter = True
except TypeError:
# noinspection PyTypeChecker
data = byteify(data)
loop = asyncio.get_event_loop()
try:
s_ver = socket.AF_INET
ip = await resolve_ip_async(host, ip_version)
if ip_is_v6(ip): s_ver = socket.AF_INET6
fhost += f" (IP: {ip})"
with socket.socket(s_ver, socket.SOCK_STREAM) as s:
s.settimeout(float(timeout))
log.debug(" [...] Connecting to host: %s", fhost)
await loop.sock_connect(s, (ip, port))
log.debug(" [+++] Connected to %s\n", fhost)
if data is None:
log.debug(" [!!!] 'data' is None. Not transmitting any data to the host.")
elif is_iter:
i = 1
for c in data_iter:
log.debug(" [...] Sending %s byte chunk (%s)\n", len(c), i)
await loop.sock_sendall(s, c)
else:
# We use 'sendall' to reliably send the entire contents of 'data' to the service we're connected to.
log.debug(" [...] Sending %s bytes to %s ...\n", len(data), fhost)
await loop.sock_sendall(s, data)
# s.sendall(data)
log.debug(" >> Reading response ...")
res = b''
i = 1
while True:
chunk = await loop.sock_recv(s, chunk_size)
if not chunk: break
res += chunk
log.debug(" [...] Read %s byte chunk (%s)\n", len(chunk), i)
i += 1
if string_result:
res = stringify(res)
if strip_result: res = res.strip("\x00").strip().strip("\x00").strip()
log.debug(" [+++] Got result ( %s bytes ) \n", len(res))
except (socket.timeout, ConnectionRefusedError, ConnectionResetError, socket.gaierror) as e:
if fail:
raise e
log.warning("Exception while connecting + sending data to: %s - reason: %s %s", fhost, type(e), str(e))
return None
return res
def send_data(
host: str, port: int, data: Optional[Union[bytes, str, Iterable]] = None, timeout: Union[int, float] = None, **kwargs
) -> Optional[Union[str, bytes]]:
"""
>>> from privex.helpers import send_data
>>> send_data('termbin.com', 9999, "hello world\\nthis is a test\\n\\nlorem ipsum dolor\\n")
'https://termbin.com/oi07'
:param str host: The hostname or IPv4/v6 address to connect to
:param port: The port number to connect to on ``host``
:param bytes|str|iter data: The data to send to ``host:port`` via a TCP socket. Generally :class:`bytes` / :class:`str`.
Can be an iterator/generator to send data in chunks. Can be ``None`` to disable sending data, instead
only receiving and returning data.
:param float|int timeout: Socket timeout. If not passed, uses the default from :func:`socket.getdefaulttimeout`.
If the global default timeout is ``None``, then falls back to ``15``
:param kwargs:
:keyword int chunk: (Default: ``64``) Maximum number of bytes to read into buffer per socket receive call.
:keyword bool string_result: (Default: ``True``) If ``True``, the response sent by the server will be casted into a :class:`str`
before returning it.
:keyword bool strip_result: (Default: ``True``) This argument only works if ``string_result`` is also True.
If both ``string_result`` and ``strip_result`` are ``True``, the response sent by the server will
have whitespace, newlines, and null bytes trimmed from the start and end after it's casted into a string.
:keyword bool fail: (Default: ``True``) If ``True``, will raise exceptions when connection errors occur. When ``False``, will simply
``None`` if there are connection exceptions raised during this function's execution.
:keyword str|int ip_version: (Default: ``any``)
:return:
"""
fhost = f"({host}):{port}"
chunk_size = int(kwargs.get('chunk', kwargs.get('chunk_size', 64)))
string_result = is_true(kwargs.get('string_result', True))
strip_result = is_true(kwargs.get('strip_result', True))
fail = is_true(kwargs.get('fail', True))
ip_version = kwargs.get('ip_version', 'any')
timeout = empty_if(timeout, empty_if(socket.getdefaulttimeout(), 15, zero=True), zero=True)
is_iter, data_iter, is_v6, v4_address, host_is_ip = False, None, False, None, False
if data is not None:
if isinstance(data, (str, bytes, int, float)):
data = byteify(data)
else:
try:
data_iter = iter(data)
is_iter = True
except TypeError:
# noinspection PyTypeChecker
data = byteify(data)
try:
ip_network(host)
host_is_ip = True
except (TypeError, ValueError) as e:
host_is_ip = False
try:
# First we resolve the IP address of 'host', so we can detect whether we're connecting to an IPv4 or IPv6 host,
# letting us adjust the AF_INET variable accordingly.
s_ver = socket.AF_INET
ip = resolve_ip(host, ip_version)
if ip_is_v6(ip):
s_ver, is_v6 = socket.AF_INET6, True
if not host_is_ip:
try:
v4_address = resolve_ip(host, 'v4')
except (socket.timeout, ConnectionRefusedError, ConnectionResetError, socket.gaierror, AttributeError) as e:
log.warning(
"Warning: failed to resolve IPv4 address for %s (to be used as a backup if IPv6 is broken). Reason: %s %s ",
type(e), str(e)
)
fhost += f" (IP: {ip})"
except (socket.timeout, ConnectionRefusedError, ConnectionResetError, socket.gaierror) as e:
if fail:
raise e
log.warning("Exception while connecting + sending data to: %s - reason: %s %s", fhost, type(e), str(e))
return None
try:
with socket.socket(s_ver, socket.SOCK_STREAM) as s:
# Once we have our socket object, we set the timeout (by default it could hang forever), and open the connection.
s.settimeout(timeout)
log.debug(" [...] Connecting to host: %s", fhost)
s.connect((ip, port))
log.debug(" [+++] Connected to %s\n", fhost)
if data is None:
log.debug(" [!!!] 'data' is None. Not transmitting any data to the host.")
elif is_iter:
i = 1
for c in data_iter:
log.debug(" [...] Sending %s byte chunk (%s)\n", len(c), i)
s.sock_sendall(c)
else:
# We use 'sendall' to reliably send the entire contents of 'data' to the service we're connected to.
log.debug(" [...] Sending %s bytes to %s ...\n", len(data), fhost)
s.sendall(data)
# Once we've sent 'data',
log.debug(" >> Reading response ...")
res = b''
i = 1
while True:
chunk = s.recv(chunk_size)
if not chunk: break
res += chunk
log.debug(" [...] Read %s byte chunk (%s)\n", len(chunk), i)
i += 1
if string_result:
res = stringify(res)
if strip_result: res = res.strip("\x00").strip().strip("\x00").strip()
log.debug(" [+++] Got result ( %s bytes ) \n", len(res))
except (socket.timeout, ConnectionRefusedError, ConnectionResetError, socket.gaierror) as e:
log.warning("Exception while connecting + sending data to: %s - reason: %s %s", fhost, type(e), str(e))
if is_v6 and not empty(v4_address):
log.warning(
"Retrying connection to %s over IPv4 instead of IPv6. || IPv6 address: %s || IPv4 address: %s ",
fhost, ip, v4_address
)
return send_data(host, port, data, timeout=timeout, **kwargs)
if fail:
raise e
return None
return res
def upload_termbin(data: Union[bytes, str], timeout: Union[int, float] = None, **kwargs) -> str:
"""
Upload the :class:`bytes` / :class:`string` ``data`` to the pastebin service `TermBin`_ ,
using the hostname and port defined in :attr:`privex.helpers.settings.TERMBIN_HOST`
and :attr:`privex.helpers.settings.TERMBIN_PORT`
NOTE - An AsyncIO version of this function is available: :func:`.upload_termbin_async`
Returns the `TermBin`_ URL as a string - which is a raw download / viewing link for the paste.
.. _TermBin: https://termbin.com
>>> my_data = "hello world\\nthis is a test\\n\\nlorem ipsum dolor\\n"
>>> upload_termbin(my_data)
'https://termbin.com/kerjk'
:param bytes|str data: The data to upload to `TermBin`_ - as either :class:`str` or :class:`bytes`
:param float|int timeout: Socket timeout. If not passed, uses the default from :func:`socket.getdefaulttimeout`.
If the global default timeout is ``None``, then falls back to ``15``
:return str url: The `TermBin`_ URL to your paste as a string - which is a raw download / viewing link for the paste.
"""
data = byteify(data)
log.info(" [...] Uploading %s bytes to termbin ...\n", len(data))
res = send_data(settings.TERMBIN_HOST, settings.TERMBIN_PORT, data, timeout=timeout, **kwargs)
log.info(" [+++] Got termbin link: %s \n", res)
return res
def upload_termbin_file(filename: str, timeout: int = 15, **kwargs) -> str:
"""
Uploads the file ``filename`` to `TermBin`_ and returns the paste URL as a string.
.. NOTE:: An AsyncIO version of this function is available: :func:`.upload_termbin_file_async`
.. NOTE:: If the data you want to upload is already loaded into a variable - you can use :func:`.upload_termbin` instead,
which accepts your data directly - through a :class:`str` or :class:`bytes` parameter
.. _TermBin: https://termbin.com
:param str filename: The path (absolute or relative) to the file you want to upload to `TermBin`_ - as a :class:`str`
:param float|int timeout: Socket timeout. If not passed, uses the default from :func:`socket.getdefaulttimeout`.
If the global default timeout is ``None``, then falls back to ``15``
:return str url: The `TermBin`_ URL to your paste as a string - which is a raw download / viewing link for the paste.
"""
log.info(" >> Uploading file '%s' to termbin", filename)
with open(filename, 'rb') as fh:
log.debug(" [...] Opened file %s - reading contents into RAM...", filename)
data = fh.read()
log.debug(" [+++] Loaded file into RAM. Total size: %s bytes", len(data))
res = upload_termbin(data, timeout=timeout, **kwargs)
log.info(" [+++] Uploaded file %s to termbin. Got termbin link: %s \n", filename, res)
return res
async def upload_termbin_async(data: Union[bytes, str], timeout: Union[int, float] = None) -> str:
"""
Upload the :class:`bytes` / :class:`string` ``data`` to the pastebin service `TermBin`_ ,
using the hostname and port defined in :attr:`privex.helpers.settings.TERMBIN_HOST`
and :attr:`privex.helpers.settings.TERMBIN_PORT`
NOTE - A synchronous (non-async) version of this function is available: :func:`.upload_termbin`
Returns the `TermBin`_ URL as a string - which is a raw download / viewing link for the paste.
.. _TermBin: https://termbin.com
>>> my_data = "hello world\\nthis is a test\\n\\nlorem ipsum dolor\\n"
>>> await upload_termbin_async(my_data)
'https://termbin.com/kerjk'
:param bytes|str data: The data to upload to `TermBin`_ - as either :class:`str` or :class:`bytes`
:param float|int timeout: Socket timeout. If not passed, uses the default from :func:`socket.getdefaulttimeout`.
If the global default timeout is ``None``, then falls back to ``15``
:return str url: The `TermBin`_ URL to your paste as a string - which is a raw download / viewing link for the paste.
"""
data = byteify(data)
log.info(" [...] Uploading %s bytes to termbin ...\n", len(data))
res = await send_data_async(settings.TERMBIN_HOST, settings.TERMBIN_PORT, data, timeout=timeout)
log.info(" [+++] Got termbin link: %s \n", res)
return res
async def upload_termbin_file_async(filename: str, timeout: int = 15) -> str:
"""
Uploads the file ``filename`` to `TermBin`_ and returns the paste URL as a string.
.. NOTE:: A synchronous (non-async) version of this function is available: :func:`.upload_termbin_file`
.. NOTE:: If the data you want to upload is already loaded into a variable - you can use :func:`.upload_termbin_async` instead,
which accepts your data directly - through a :class:`str` or :class:`bytes` parameter
.. _TermBin: https://termbin.com
:param str filename: The path (absolute or relative) to the file you want to upload to `TermBin`_ - as a :class:`str`
:param float|int timeout: Socket timeout. If not passed, uses the default from :func:`socket.getdefaulttimeout`.
If the global default timeout is ``None``, then falls back to ``15``
:return str url: The `TermBin`_ URL to your paste as a string - which is a raw download / viewing link for the paste.
"""
log.info(" >> Uploading file '%s' to termbin", filename)
with open(filename, 'rb') as fh:
log.debug(" [...] Opened file %s - reading contents into RAM...", filename)
data = fh.read()
log.debug(" [+++] Loaded file into RAM. Total size: %s bytes", len(data))
res = await upload_termbin_async(data, timeout=timeout)
log.info(" [+++] Uploaded file %s to termbin. Got termbin link: %s \n", filename, res)
return res
| 46.695604 | 140 | 0.603859 | """
Various wrapper functions/classes which use :mod:`socket` or are strongly tied to functions in this file
which use :mod:`socket`. Part of :mod:`privex.helpers.net` - network related helper code.
**Copyright**::
+===================================================+
| © 2019 Privex Inc. |
| https://www.privex.io |
+===================================================+
| |
| Originally Developed by Privex Inc. |
| License: X11 / MIT |
| |
| Core Developer(s): |
| |
| (+) Chris (@someguy123) [Privex] |
| (+) Kale (@kryogenic) [Privex] |
| |
+===================================================+
Copyright 2019 Privex Inc. ( https://www.privex.io )
"""
import asyncio
import functools
import socket
import ssl
import time
from ipaddress import ip_network
from typing import Any, Callable, Generator, IO, Iterable, List, Optional, Tuple, Union
import attr
from privex.helpers import settings
from privex.helpers.common import LayeredContext, byteify, empty, empty_if, is_true, stringify, strip_null
from privex.helpers.thread import SafeLoopThread
from privex.helpers.asyncx import await_if_needed, run_coro_thread
from privex.helpers.net.util import generate_http_request, get_ssl_context, ip_is_v6, ip_sock_ver, is_ip
from privex.helpers.net.dns import resolve_ip, resolve_ip_async
from privex.helpers.types import AUTO, AUTO_DETECTED, AnyNum, STRBYTES, T
import logging
log = logging.getLogger(__name__)
__all__ = [
'AnySocket', 'OpAnySocket', 'SocketContextManager',
'StopLoopOnMatch', 'SocketWrapper', 'AsyncSocketWrapper', 'send_data_async', 'send_data', 'upload_termbin',
'upload_termbin_file', 'upload_termbin_async', 'upload_termbin_file_async'
]
AnySocket = Union[ssl.SSLSocket, "socket.socket"]
OpAnySocket = Optional[Union[ssl.SSLSocket, "socket.socket"]]
class SocketContextManager:
parent_class: Union["SocketWrapper", "AsyncSocketWrapper"]
def __init__(self, parent_class: Union["SocketWrapper", "AsyncSocketWrapper"]):
self.parent_class = parent_class
def __enter__(self) -> "SocketWrapper":
log.debug("Entering SocketContextManager")
self.parent_class.reconnect()
return self.parent_class
def __exit__(self, exc_type, exc_val, exc_tb):
log.debug("Exiting SocketContextManager")
self.parent_class.close()
async def __aenter__(self) -> "AsyncSocketWrapper":
log.debug("[async] Entering SocketContextManager")
await self.parent_class.reconnect()
return self.parent_class
async def __aexit__(self, exc_type, exc_val, exc_tb):
log.debug("[async] Exiting SocketContextManager")
self.parent_class.close()
class StopLoopOnMatch(Exception):
def __init__(self, message: str, match: Any = None, compare: str = None, compare_lower: bool = True, **extra):
self.message = message
self.match = match
self.compare = compare
self.compare_lower = compare_lower
self.extra = extra
super().__init__(message)
def _sockwrapper_auto_connect(new_sock: bool = False):
def _decorator(f):
@functools.wraps(f)
def wrapper(self: Union["SocketWrapper"], *args, _sock_tries=0, **kwargs):
kwargs = dict(kwargs)
gensock = None
if kwargs.pop('new_sock', new_sock):
log.debug("new_sock is true for call to function %s - generating socket to kwarg 'sock'...", f.__name__)
# kwargs['sock'] = self._select_socket(new_sock=True)
gensock = SocketTracker.duplicate(self.tracker)
elif 'sock' in kwargs and kwargs['sock'] not in [None, False, '']:
gensock = kwargs.pop('sock')
if gensock not in [None, False, '']:
log.debug("'sock' is present for call to function %s...", f.__name__)
with gensock as sck:
log.debug('ensuring socket is open (inside with). now connecting socket.')
try:
# self.connect(host=kwargs.get('host'), port=kwargs.get('port'), sock=sck)
kwargs['sock'] = sck
except OSError as e:
if 'already connected' in str(e):
log.debug('socket already connected. continuing.')
log.debug('socket should now be connected. calling function %s', f.__name__)
return f(self, *args, **kwargs)
if not self.connected:
log.debug('instance socket is not connected ( calling function %s )', f.__name__)
if not self.auto_connect:
raise ConnectionError(
"Would've auto-connected SocketWrapper, but self.auto_connect is False. Please call connect before "
"interacting with the socket."
)
if any([empty(self.host, zero=True), empty(self.port, zero=True)]):
raise ConnectionError("Tried to auto-connect SocketWrapper, but self.host and/or self.port are empty!")
log.debug('connecting instance socket ( calling function %s )', f.__name__)
# self.connect(self.host, self.port)
self.tracker.connect()
try:
_sock_tries += 1
return f(self, *args, **kwargs)
except (BrokenPipeError, ConnectionResetError, ConnectionAbortedError) as e:
if self.error_reconnect and _sock_tries < 3:
log.error("The socket appears to have broken. Resetting and trying again. Error was: %s - %s", type(e), str(e))
self.tracker.reconnect()
return wrapper(self, *args, _sock_tries=_sock_tries, **kwargs)
raise e
return wrapper
return _decorator
def _async_sockwrapper_auto_connect():
def _decorator(f):
@functools.wraps(f)
async def wrapper(self: Union["AsyncSocketWrapper"], *args, _sock_tries=0, **kwargs):
if not self.tracker.connected:
if not self.auto_connect:
raise ConnectionError(
"Would've auto-connected AsyncSocketWrapper, but self.auto_connect is False. Please call connect before "
"interacting with the socket."
)
if any([empty(self.host, zero=True), empty(self.port, zero=True)]):
raise ConnectionError("Tried to auto-connect AsyncSocketWrapper, but self.host and/or self.port are empty!")
# await self.connect(self.host, self.port)
await self.tracker.connect_async()
try:
_sock_tries += 1
return await f(self, *args, **kwargs)
except (BrokenPipeError, ConnectionResetError, ConnectionAbortedError) as e:
log.error("The socket appears to have broken. Error was: %s - %s", type(e), str(e))
if self.error_reconnect and _sock_tries < 3:
log.error("Resetting the connection and trying again...")
await self.tracker.reconnect_async()
return await wrapper(self, *args, _sock_tries=_sock_tries, **kwargs)
raise e
return wrapper
return _decorator
class MockContext:
def __enter__(self):
# return self.auto_socket
return "yes"
def __exit__(self, exc_type, exc_val, exc_tb):
pass
async def __aenter__(self):
return "yes"
async def __aexit__(self, exc_type, exc_val, exc_tb):
pass
@attr.s
class SocketTracker:
"""
Data class used by :class:`.SocketWrapper` / :class:`.AsyncSocketWrapper` for managing sockets
"""
host: str = attr.ib()
port: int = attr.ib(converter=int)
timeout: Union[int, float] = attr.ib(factory=lambda: settings.DEFAULT_SOCKET_TIMEOUT)
server: bool = attr.ib(default=False, converter=is_true)
connected: bool = attr.ib(default=False, converter=is_true)
binded: bool = attr.ib(default=False, converter=is_true)
listening: bool = attr.ib(default=False, converter=is_true)
use_ssl: bool = attr.ib(default=False, converter=is_true)
socket_conf: dict = attr.ib(factory=dict)
ssl_conf: dict = attr.ib(factory=dict)
ssl_wrap_conf: dict = attr.ib(factory=dict)
hostname: str = attr.ib(default=None)
_ssl_context: ssl.SSLContext = attr.ib(default=None)
_ssl_socket: ssl.SSLSocket = attr.ib(default=None)
_loop: asyncio.AbstractEventLoop = attr.ib(default=None)
_socket: AnySocket = attr.ib(default=None)
_socket_layer_ctx = attr.ib(default=None)
_host_v4: Optional[str] = attr.ib(default=None)
_host_v6: Optional[str] = attr.ib(default=None)
_host_v4_resolved: bool = attr.ib(default=False)
_host_v6_resolved: bool = attr.ib(default=False)
def __attrs_post_init__(self):
self.hostname = empty_if(self.hostname, self.host, zero=True)
@property
def family(self) -> int:
return self.socket_conf.get('family', -1)
@family.setter
def family(self, value: int):
self.socket_conf['family'] = value
@property
def host_v4(self) -> Optional[str]:
if not self._host_v4_resolved:
self._host_v4 = resolve_ip(self.host, 'v4')
self._host_v4_resolved = True
return self._host_v4
@property
def host_v6(self) -> Optional[str]:
if not self._host_v6_resolved:
self._host_v6 = resolve_ip(self.host, 'v6')
self._host_v6_resolved = True
return self._host_v6
@property
def socket(self):
if not self._socket:
self._socket = socket.socket(**self.socket_conf)
return self._socket
@socket.setter
def socket(self, value):
pass
@property
def socket_layer_ctx(self):
if not self._socket_layer_ctx:
self._socket_layer_ctx = LayeredContext(MockContext())
return self._socket_layer_ctx
@socket_layer_ctx.setter
def socket_layer_ctx(self, value):
self._socket_layer_ctx = value
def _make_context(self, **kwargs) -> ssl.SSLContext:
cnf = {**self.ssl_conf, **kwargs}
return get_ssl_context(**cnf)
@property
def ssl_context(self):
if not self._ssl_context:
self._ssl_context = self._make_context()
return self._ssl_context
@ssl_context.setter
def ssl_context(self, value):
self._ssl_context = value
@property
def ssl_socket(self):
if not self._ssl_socket:
self._ssl_socket = self.ssl_context.wrap_socket(self.socket, **self.ssl_wrap_conf)
return self._ssl_socket
@ssl_socket.setter
def ssl_socket(self, value):
self._ssl_socket = value
@property
def loop(self) -> asyncio.AbstractEventLoop:
if not self._loop:
self._loop = asyncio.get_event_loop()
return self._loop
@property
def _auto_socket(self):
return self.ssl_socket if self.use_ssl else self.socket
@property
def auto_socket(self) -> AnySocket:
if not self.connected: self.connect()
return self._auto_socket
@property
def ip_address(self):
try:
if empty(self._auto_socket) or empty(self._auto_socket.getpeername()):
return None
except Exception as e:
log.warning("Error while getting peername: %s %s", type(e), str(e))
return None
return self._auto_socket.getpeername()[0]
connected_ip = ip_address
@property
def connected_port(self):
try:
if empty(self._auto_socket) or empty(self._auto_socket.getpeername()):
return None
except Exception as e:
log.warning("Error while getting peername: %s %s", type(e), str(e))
return None
return self._auto_socket.getpeername()[1]
def bind(self, address: Tuple[str, AnyNum] = None, force=False, **kwargs):
if self.binded and not force:
return self.auto_socket
self.auto_socket.bind(address)
self.binded = True
return self.auto_socket
def listen(self, backlog: int = 10, force=False, **kwargs):
s = self.auto_socket
if self.listening and not force:
return s
self.auto_socket.listen(backlog)
self.listening = True
return self.auto_socket
def post_connect(self, sock: AnySocket):
log.debug("[%s.%s] Connected to host: %s", __name__, self.__class__.__name__, sock.getpeername())
sock.settimeout(self.timeout)
return sock
def v6_fallback(self, ex: Exception = None) -> bool:
ip = self.ip_address
if self.family == socket.AF_INET6 or (self.family != socket.AF_INET and not empty(ip) and ip_is_v6(ip)):
if self.host_v4:
if ex:
log.warning(
"[%s.%s] Error while using IPv6. Falling back to v4. %s %s",
__name__, self.__class__.__name__, type(ex), str(ex)
)
self.family = socket.AF_INET
return True
return False
def connect(self, force=False, override_ssl=None, _conn_tries=0) -> AnySocket:
if not self.connected or force:
sock = self.socket
if self.use_ssl and override_ssl in [None, True]:
sock = self.ssl_socket
log.debug("[%s.%s] Connecting to host %s on port %s", __name__, self.__class__.__name__, self.host, self.port)
# log.debug("Connecting to host %s on port %s", self.host, self.port)
try:
_conn_tries += 1
sock.settimeout(self.timeout)
sock.connect((self.host, self.port))
except OSError as e:
if 'already connected' in str(e):
log.debug("[%s.%s] Got OSError. Already connected. %s - %s", __name__, self.__class__.__name__, type(e), str(e))
self.connected = True
return self.post_connect(self.auto_socket)
if _conn_tries >= 3:
raise e
if not self.v6_fallback(e):
log.warning("[%s.%s] Got OSError. Resetting. %s - %s", __name__, self.__class__.__name__, type(e), str(e))
# self._socket = None
return self.reconnect(force=True, override_ssl=override_ssl, _conn_tries=_conn_tries)
# sock.settimeout(self.timeout)
self.connected = True
if self.use_ssl:
self.ssl_socket = sock
else:
self.socket = sock
return self.post_connect(sock)
sock = self.ssl_socket if self.use_ssl and override_ssl in [None, True] else self.socket
return self.post_connect(sock)
def reconnect(self, force=True, override_ssl=None, _conn_tries=0) -> AnySocket:
if self.connected or force:
self.disconnect()
return self.connect(force=True, override_ssl=override_ssl, _conn_tries=_conn_tries)
async def reconnect_async(self, force=True, override_ssl=None, _conn_tries=0) -> AnySocket:
if self.connected or force:
self.disconnect()
return await self.connect_async(force=True, override_ssl=override_ssl, _conn_tries=_conn_tries)
async def connect_async(self, force=False, override_ssl=None, _conn_tries=0) -> AnySocket:
if not self.connected or force:
sock = self.socket
if self.use_ssl and override_ssl in [None, True]:
sock = self.ssl_socket
log.debug("[async] [%s.%s] Connecting to host %s on port %s (timeout: %s)", __name__, self.__class__.__name__,
self.host, self.port, self.timeout)
try:
_conn_tries += 1
sock.settimeout(self.timeout)
await asyncio.wait_for(self.loop.sock_connect(sock, (self.host, self.port)), self.timeout + 0.1)
except (OSError, asyncio.TimeoutError) as e:
if 'already connected' in str(e):
log.debug("[%s.%s] Got OSError. Already connected. %s - %s", __name__, self.__class__.__name__, type(e), str(e))
self.connected = True
return self.post_connect(self.auto_socket)
if _conn_tries >= 3:
raise e
if not self.v6_fallback(e):
log.warning("[%s.%s] Got OSError. Resetting. %s - %s", __name__, self.__class__.__name__, type(e), str(e))
# self._socket = None
return await self.reconnect_async(force=True, override_ssl=override_ssl, _conn_tries=_conn_tries)
# sock.settimeout(self.timeout)
self.connected = True
sock = self.ssl_socket if self.use_ssl and override_ssl in [None, True] else self.socket
return self.post_connect(sock)
def _shutdown(self, sck):
try:
sck.shutdown(socket.SHUT_RDWR)
except OSError as e:
if 'not connected' in str(e): return
log.warning("OSError while shutting down socket: %s %s", type(e), str(e))
except Exception as e:
log.warning("Exception while shutting down socket: %s %s", type(e), str(e))
def _close(self, sck):
try:
sck.close()
except OSError as e:
log.warning("OSError while closing socket: %s %s", type(e), str(e))
except Exception as e:
log.warning("Exception while closing socket: %s %s", type(e), str(e))
def disconnect(self):
self.connected, self.binded, self.listening = False, False, False
try:
log.debug("[%s.%s] Disconnecting socket for host %s on port %s", __name__, self.__class__.__name__, self.host, self.port)
# log.debug()
if self._socket:
self._shutdown(self._socket)
self._close(self._socket)
# try:
# self._socket.shutdown(socket.SHUT_RDWR)
# except OSError as e:
# log.warning("OSError while shutting down socket: %s %s", type(e), str(e))
# except Exception as e:
# log.warning("Exception while shutting down socket: %s %s", type(e), str(e))
# self._socket.close()
self._socket = None
if self._ssl_socket:
self._shutdown(self._ssl_socket)
self._close(self._ssl_socket)
# self._ssl_socket.shutdown(socket.SHUT_RDWR)
# self._ssl_socket.close()
self._ssl_socket = None
return True
except Exception:
log.exception("error while closing socket")
return False
@classmethod
def duplicate(cls, inst: "SocketTracker", **kwargs) -> "SocketTracker":
cfg = dict(
host=inst.host, port=inst.port, timeout=inst.timeout, server=inst.server, use_ssl=inst.use_ssl,
socket_conf=inst.socket_conf, ssl_conf=inst.ssl_conf, ssl_wrap_conf=inst.ssl_wrap_conf
)
cfg = {**cfg, **kwargs}
return cls(**cfg)
def __enter__(self):
if self.socket_layer_ctx.virtual_layer == 0:
self._socket_layer_ctx = None
if self.connected:
self.reconnect()
elif not self.connected: self.connect()
# return self.auto_socket
self.socket_layer_ctx.__enter__()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.socket_layer_ctx.virtual_layer <= 1:
self.disconnect()
self.socket_layer_ctx.__exit__(exc_type, exc_val, exc_tb)
# return self.auto_socket
async def __aenter__(self):
if self.socket_layer_ctx.virtual_layer == 0:
self._socket_layer_ctx = None
if self.connected:
await self.reconnect_async()
elif not self.connected: await self.connect_async()
# return self.auto_socket
await self.socket_layer_ctx.__aenter__()
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
# self.disconnect()
if self.socket_layer_ctx.virtual_layer <= 1:
self.disconnect()
await self.socket_layer_ctx.__aexit__(exc_type, exc_val, exc_tb)
def __getattr__(self, item):
try:
return super().__getattribute__(item)
except AttributeError:
pass
sock: AnySocket = super().__getattribute__('auto_socket')
return getattr(sock, item)
class SocketWrapper(object):
"""
A wrapper class to make working with :class:`socket.socket` much simpler.
.. NOTE:: For AsyncIO, use :class:`.AsyncSocketWrapper` instead.
**Features**
* Automatic address family detection - detects whether you have working IPv4 / IPv6, and decides the best way
to connect to a host, depending on what IP versions that host supports
* ``Happy Eyeballs`` for IPv6. If something goes wrong with an IPv6 connection, it will fallback to IPv4 if the
host has it available (i.e. a domain with both ``A`` and ``AAAA`` records)
* Easy to use SSL, which works with HTTPS and other SSL-secured protocols. Just pass ``use_ssl=True`` in the constructor.
* Many wrapper methods such as :meth:`.recv_eof`, :meth:`.query`, and :meth:`.http_request` to make working
with sockets much easier.
**Examples**
Send a string of bytes / text to a server, and then read until EOF::
>>> sw = SocketWrapper('icanhazip.org', 80)
>>> res = sw.query("GET / HTTP/1.1\\nHost: icanhazip.com\\n\\n")
>>> print(res)
HTTP/1.1 200 OK
Server: nginx
Content-Type: text/plain; charset=UTF-8
Content-Length: 17
x-rtfm: Learn about this site at http://bit.ly/icanhazip-faq and do not abuse the service.
2a07:e00::abc
For basic HTTP requests, you can use :meth:`.http_request`, which will automatically send ``Host`` (based on the host you passed),
and ``User-Agent``. SSL works too, just set ``use_ssl=True``::
>>> sw = SocketWrapper('myip.privex.io', 443, use_ssl=True)
>>> res = sw.http_request('/?format=json')
>>> print(res)
HTTP/1.1 200 OK
Server: nginx
Date: Tue, 22 Sep 2020 03:40:48 GMT
Content-Type: application/json
Content-Length: 301
Connection: close
Access-Control-Allow-Origin: *
{"error":false,"geo":{"as_name":"Privex Inc.","as_number":210083,"city":"Stockholm","country":"Sweden",
"country_code":"SE","error":false,"zip":"173 11"},"ip":"2a07:e00::abc","ip_type":"ipv6","ip_valid":true,
"messages":[], "ua":"Python Privex Helpers ( https://github.com/Privex/python-helpers )"}
Standard low-level sending and receiving data::
>>> sw = SocketWrapper('127.0.0.1', 8888)
>>> sw.sendall(b"hello world") # Send the text 'hello world'
>>> sw.recv(64) # read up to 64 bytes of data from the socket
b"lorem ipsum\n"
"""
DEFAULT_TIMEOUT = empty_if(socket.getdefaulttimeout(), settings.DEFAULT_SOCKET_TIMEOUT, zero=True)
_context: Optional[ssl.SSLContext]
_socket: OpAnySocket
_base_socket: Optional[socket.socket]
_ssl_socket: Optional[ssl.SSLSocket]
_layer_context: Optional[LayeredContext]
_socket_ctx_mgr: SocketContextManager
# connected: bool
auto_connect: bool
auto_listen: bool
listen_backlog: int
tracker: SocketTracker
def __init__(
self, host: str, port: int, server=False, family=-1, type=socket.SOCK_STREAM, proto=-1, fileno=None,
timeout=DEFAULT_TIMEOUT, use_ssl=False, verify_cert=False, **kwargs
):
self.host, self.port = host, int(port)
self.server = is_true(server)
# if self.server and (empty(type) or type == -1):
# type = socket.SOCK_STREAM
# self._socket = kwargs.get('socket', None)
# self._base_socket = kwargs.get('base_socket', None)
# self._ssl_socket = kwargs.get('ssl_socket', None)
_context = kwargs.get('ssl_context', None)
# self.connected = not (self._socket is None)
binded, listening = kwargs.get('binded', False), kwargs.get('listening', False)
check_connectivity = kwargs.get('check_connectivity', settings.CHECK_CONNECTIVITY)
self.auto_connect = kwargs.get('auto_connect', True)
self.error_reconnect = kwargs.get('error_reconnect', True)
self.auto_listen = kwargs.get('auto_listen', True)
self.listen_backlog = kwargs.get('listen_backlog', 10)
self.read_timeout = kwargs.get('read_timeout', settings.DEFAULT_READ_TIMEOUT)
self.send_timeout = kwargs.get('send_timeout', settings.DEFAULT_WRITE_TIMEOUT)
from privex.helpers.net.common import check_v4_async, check_v6_async
if family == -1 and is_ip(host):
log.debug("Host '%s' appears to be an IP. Automatically setting address family based on IP.", host)
family = ip_sock_ver(host)
if family == -1 and check_connectivity:
host_v4 = resolve_ip(host, 'v4')
host_v6 = resolve_ip(host, 'v6')
if host_v6 is not None and run_coro_thread(check_v6_async):
log.debug("Domain %s has one or more IPv6 addresses, and current system appears to have IPv6 connectivity. "
"Using domain's IPv6 address: %s", host, host_v6)
family = socket.AF_INET6
elif host_v4 is not None and run_coro_thread(check_v4_async):
log.debug("Domain %s has one or more IPv4 addresses, and current system appears to have IPv4 connectivity. "
"Using domain's IPv4 address: %s", host, host_v4)
family = socket.AF_INET
# self.use_ssl = use_ssl
# self.socket_conf = dict(family=family, type=type, proto=proto, fileno=fileno)
# self.ssl_wrap_conf = dict(
# server_hostname=kwargs.get('server_hostname'),
# session=kwargs.get('session'),
# do_handshake_on_connect=kwargs.get('do_handshake_on_connect', True)
# )
# self.ssl_conf = dict(
# verify_cert=verify_cert,
# check_hostname=kwargs.get('check_hostname'),
# verify_mode=kwargs.get('verify_mode')
# )
# sck = self._socket if self._socket is not None else socket.socket(**self.socket_conf)
self.tracker = SocketTracker(
self.host, self.port,
timeout=timeout, server=server, binded=binded, connected=kwargs.get('connected', False),
listening=listening, use_ssl=use_ssl,
socket_conf=dict(family=family, type=type, proto=proto, fileno=fileno),
ssl_conf=dict(
verify_cert=verify_cert,
check_hostname=kwargs.get('check_hostname'),
verify_mode=kwargs.get('verify_mode')
),
ssl_wrap_conf=dict(
server_hostname=kwargs.get('server_hostname'),
session=kwargs.get('session'),
do_handshake_on_connect=kwargs.get('do_handshake_on_connect', True)
), hostname=kwargs.get('hostname', None)
)
_socket = kwargs.get('socket', None)
_base_socket = kwargs.get('base_socket', None)
_ssl_socket = kwargs.get('ssl_socket', None)
if _context is not None: self.tracker.ssl_context = _context
if _socket is not None: self.tracker.socket = _socket
if _base_socket is not None: self.tracker.socket = _base_socket
if _ssl_socket is not None: self.tracker.ssl_socket = _ssl_socket
# self._timeout = float(timeout)
self._layer_context = None
self._socket_ctx_mgr = SocketContextManager(self)
# if use_ssl:
# ctx = get_ssl_context(**ssl_params)
# s = ctx.wrap_socket(
# server_hostname=kwargs.get('server_hostname'),
# session=kwargs.get('session'),
# do_handshake_on_connect=kwargs.get('do_handshake_on_connect', True),
# )
@property
def ssl_conf(self) -> dict:
return self.tracker.ssl_conf
@ssl_conf.setter
def ssl_conf(self, value):
self.tracker.ssl_conf = value
@property
def ssl_wrap_conf(self) -> dict:
return self.tracker.ssl_wrap_conf
@ssl_wrap_conf.setter
def ssl_wrap_conf(self, value):
self.tracker.ssl_wrap_conf = value
@property
def socket_conf(self) -> dict:
return self.tracker.socket_conf
@socket_conf.setter
def socket_conf(self, value):
self.tracker.socket_conf = value
@property
def timeout(self):
return self.tracker.timeout
@property
def _auto_socket(self):
return self.tracker._auto_socket
@timeout.setter
def timeout(self, value):
self.socket.settimeout(value)
self.tracker.timeout = value
# self.base_socket.settimeout(value)
# if self._socket:
# self._socket.settimeout(value)
# self._timeout = value
def _make_context(self, **kwargs) -> ssl.SSLContext:
cnf = {**self.ssl_conf, **kwargs}
return get_ssl_context(**cnf)
def _make_socket(self, **kwargs) -> socket.socket:
cnf = {**self.socket_conf, **kwargs}
# if self.server:
# if 'family' in cnf: del cnf['family']
# if 'type' in cnf: del cnf['type']
# if 'proto' in cnf: del cnf['proto']
# if 'fileno' in cnf: del cnf['fileno']
# log.info("socket host: %s || port: %s", self.host, self.port)
# log.info("socket extra config: %s", cnf)
# return socket.create_server((self.host, self.port), **cnf)
return socket.socket(**cnf)
def _ssl_wrap_socket(self, sock: socket.socket = None, ctx: ssl.SSLContext = None, **kwargs) -> ssl.SSLSocket:
cnf = {**self.ssl_wrap_conf, **kwargs}
ctx = empty_if(ctx, self.context, itr=True, zero=True)
sock = empty_if(sock, self.base_socket, itr=True, zero=True)
return ctx.wrap_socket(sock, **cnf)
def _select_socket(self, new_sock=False, **kwargs) -> Union[ssl.SSLSocket, "socket.socket"]:
if new_sock:
sock = self._make_socket()
if kwargs.get('use_ssl', self.use_ssl):
sock = self._ssl_wrap_socket(sock, **kwargs)
return sock
if self.use_ssl:
return self._ssl_wrap_socket(**kwargs)
return self.base_socket
@property
def hostname(self):
return self.tracker.hostname
@hostname.setter
def hostname(self, value):
self.tracker.hostname = value
@property
def context(self) -> ssl.SSLContext:
# if not self._context:
# self._context = self._make_context()
# return self._context
if not self.tracker.ssl_context:
self.tracker.ssl_context = self._make_context()
return self.tracker.ssl_context
ssl_context = context
@property
def base_socket(self) -> socket.socket:
if not self.tracker.socket:
self.tracker.socket = self._make_socket()
return self.tracker.socket
@base_socket.setter
def base_socket(self, value: socket.socket):
self.tracker.socket = value
@property
def socket(self) -> AnySocket:
# if not self._socket:
# self._socket = self._select_socket()
# if not self.server: self._socket.settimeout(self.timeout)
return self.tracker.auto_socket
@socket.setter
def socket(self, value: AnySocket):
if self.tracker.use_ssl:
self.tracker.ssl_socket = value
else:
self.tracker.socket = value
# self._socket = value
@property
def connected(self):
return self.tracker.connected
# @connected.setter
# def connected(self, value):
# self.tracker.connected = value
def _connect_sanity(self, host, port, sock: OpAnySocket = None, **kwargs):
port = int(port)
sck = self.socket if sock is None else sock
if sock is None and self.connected and self.socket is not None:
if host != self.host or port != int(self.port):
log.debug(f"Already connected, but {self.__class__.__name__}.connect called with different host/port than stored. "
f"Trigerring a reconnect.")
return self.reconnect(host, port, sock=sck)
log.debug(f"Already connected, {self.__class__.__name__}.connect called with same details as previously. "
f"Returning existing socket.")
return sck
if empty(port, True, True):
raise ValueError(f"{self.__class__.__name__}.connect requires a port. Either connect(host, port) or connect( (host,port) )")
return True
def _connect(self, host: str, port: AnyNum, sock: OpAnySocket = None, **kwargs) -> AnySocket:
port = int(port)
sck = self.tracker if sock is None else sock
if self.server:
log.debug("Binding to host '%s' on port %s", host, port)
self.bind(host, port, sock=sock)
log.debug("Successfully binded to host '%s' on port %s", host, port)
if self.auto_listen:
log.debug("Auto-listen is enabled. Calling %s.listen(%s)", self.__class__.__name__, self.listen_backlog)
self.listen(self.listen_backlog, sock=sock)
log.debug("%s is now listening on host(s) '%s' on port %s", self.__class__.__name__, host, port)
# if sock is None: self.host, self.port, self.connected = host, port, True
return sck
log.debug("[%s.%s] Connecting to host %s on port %s", self.__class__.__name__, __name__, host, port)
sck.connect((host, port))
# if sock is None: self.host, self.port, self.connected = host, port, True
return sck
def _get_addr(self, host: Union[str, Tuple[str, AnyNum]] = None, port: AnyNum = None) -> Tuple[str, int]:
csn = self.__class__.__name__
if host is None:
if self.host is None: raise ValueError(f"No host specified to {csn}.reconnect(host, port) - and no host in {csn}.host")
host = self.host
if port is None:
if self.port is None: raise ValueError(f"No port specified to {csn}.connect(host, port) - and no port in {csn}.port")
port = self.port
if isinstance(host, (list, set)): host = tuple(host)
if isinstance(host, tuple): host, port = host
return host, int(port)
def bind(self, host: Union[str, Tuple[str, AnyNum]] = None, port: AnyNum = None, sock: OpAnySocket = None, **kwargs):
sck = self.socket if sock is None else sock
if sock is None and self.binded:
return
sck.bind(self._get_addr(host, port))
if sock is None: self.binded = True
return True
def connect(self, host: Union[str, Tuple[str, AnyNum]] = None, port: AnyNum = None, sock: OpAnySocket = None, **kwargs) -> AnySocket:
# csn = self.__class__.__name__
#
# if host is None:
# if self.host is None: raise ValueError(f"No host specified to {csn}.reconnect(host, port) - and no host in {csn}.host")
# host = self.host
# if port is None:
# if self.port is None: raise ValueError(f"No port specified to {csn}.connect(host, port) - and no port in {csn}.port")
# port = self.port
# if isinstance(host, (list, set)): host = tuple(host)
# if isinstance(host, tuple): host, port = host
host, port = self._get_addr(host, port)
sanity = self._connect_sanity(host, port, sock=sock)
if sanity is not True: return sanity
return self._connect(host, port, sock=sock)
def reconnect(self, host: Union[str, Tuple[str, AnyNum]] = None, port: AnyNum = None, sock: OpAnySocket = None, **kwargs):
csn = self.__class__.__name__
# self.close(sock=sock)
if host is None:
if port is not None:
if self.host is None:
raise ValueError(f"No host specified to {csn}.reconnect(host, port) - and no host in {csn}.host")
# return self.connect(self.host, port, sock=sock, **kwargs)
host = self.host
# self.tracker.host, self.tracker.port = self.host, port
# self.tracker.reconnect()
# return self.tracker
# if all([self.host is not None, self.port is not None]):
# return self.connect(self.host, self.port, sock=sock, **kwargs)
# self.tracker.host, self.tracker.port = self.host,
# self.tracker.connect()
# return self.tracker
elif port is None:
port = self.port
# self.tracker.host, self.tracker.port = host, port
# else:
self.tracker.host, self.tracker.port = host, port
# return self.connect(host, port, sock=sock, **kwargs)
self.tracker.reconnect()
return self.tracker
# return self.connect(host, self.port, sock=sock, **kwargs)
def listen(self, backlog=10, sock: OpAnySocket = None, **kwargs):
if self.listening:
return True
(self.socket if sock is None else sock).listen(backlog)
if sock is None: self.listening = True
return True
@_sockwrapper_auto_connect()
def accept(self, sock: OpAnySocket = None, **kwargs) -> Tuple[AnySocket, Tuple[str, int]]:
return (self.socket if sock is None else sock).accept()
@_sockwrapper_auto_connect()
def settimeout(self, value, sock: OpAnySocket = None, **kwargs):
return (self.socket if sock is None else sock).settimeout(value)
def close(self, sock: OpAnySocket = None):
log.debug("Closing socket connection to host: %s || port: %s", self.host, self.port)
if sock is not None:
log.debug(" !! sock was specified. only closing sock.")
try:
sock.close()
log.debug("Closed sock.")
except Exception:
log.exception("error while closing sock")
return
self.tracker.disconnect()
# try:
# if self._socket is not None:
# log.debug("closing self.socket")
# self.socket.close()
# except Exception:
# log.exception("error while closing self.socket")
# try:
# if self._base_socket is not None:
# log.debug("closing self.base_socket")
# self.base_socket.close()
# except Exception:
# log.exception("error while closing self.base_socket")
#
# try:
# if self._ssl_socket is not None:
# self._ssl_socket.close()
# log.debug("closing self._ssl_socket")
# except Exception:
# log.exception("error while closing self._ssl_socket")
# self.connected = False
# log.debug("setting socket instance attributes to None")
# self._socket, self._ssl_socket, self._base_socket = None, None, None
@_sockwrapper_auto_connect()
def recv(self, bufsize: int, flags: int = None, sock: OpAnySocket = None, **kwargs) -> bytes:
if flags is None: return (self.socket if sock is None else sock).recv(bufsize)
return (self.socket if sock is None else sock).recv(bufsize, flags)
@_sockwrapper_auto_connect()
def recvfrom(self, bufsize: int, flags: int = None, sock: OpAnySocket = None, **kwargs) -> Tuple[bytes, Any]:
if flags is None: return (self.socket if sock is None else sock).recvfrom(bufsize)
return (self.socket if sock is None else sock).recvfrom(bufsize, flags)
@_sockwrapper_auto_connect()
def recvmsg(
self, bufsize: int, ancbufsize:int = None, flags: int = None, sock: OpAnySocket = None, **kwargs
) -> Tuple[bytes, List[Tuple[int, int, bytes]], int, Any]:
args = [bufsize]
if ancbufsize is not None: args.append(ancbufsize)
if flags is not None: args.append(flags)
return (self.socket if sock is None else sock).recvmsg(*args)
@_sockwrapper_auto_connect()
def read_eof(
self, bufsize: int = 256, eof_timeout: AnyNum = 120, flags: int = None, timeout_fail=False, strip=True,
conv: Optional[Callable[[Union[bytes, str]], T]] = stringify, sock: OpAnySocket = None, **kwargs
) -> Union[bytes, str, T]:
strip_func = kwargs.get('strip_func', lambda d: strip_null(d, conv=conv))
data = b''
total_time = 0.0
while True:
st_time = time.time()
chunk = self.recv(bufsize, flags, sock=sock)
if not chunk:
log.debug("Finished reading until EOF")
break
e_time = time.time()
total_time += (e_time - st_time)
data += chunk
if total_time > eof_timeout:
log.error("Giving up, spent over %f seconds (%f) reading until EOF for host %s", eof_timeout, total_time, self.host)
if timeout_fail:
raise TimeoutError(f"Giving up, spent over {eof_timeout} seconds ({total_time}) reading until EOF for host {self.host}")
break
return strip_func(data) if strip else data
@_sockwrapper_auto_connect()
def shutdown(self, how: int = None, sock: OpAnySocket = None, **kwargs):
how = empty_if(how, socket.SHUT_RDWR, itr=True)
return (self.socket if sock is None else sock).shutdown(how)
@_sockwrapper_auto_connect()
def send(self, data: Union[str, bytes], flags: int = None, sock: OpAnySocket = None, **kwargs):
a = [byteify(data)]
if not empty(flags): a.append(flags)
return (self.socket if sock is None else sock).send(*a)
@_sockwrapper_auto_connect()
def sendall(self, data: Union[str, bytes], flags: int = None, sock: OpAnySocket = None, **kwargs):
a = [byteify(data)]
if not empty(flags): a.append(flags)
return (self.socket if sock is None else sock).sendall(*a)
@_sockwrapper_auto_connect()
def sendto(self, data: Union[str, bytes], *args, sock: OpAnySocket = None, **kwargs):
return (self.socket if sock is None else sock).sendto(byteify(data), *args, **kwargs)
@_sockwrapper_auto_connect()
def send_chunks(self, gen: Union[Iterable, Generator], flags: int = None, sock: OpAnySocket = None, **kwargs):
results = []
for c in gen:
results.append(self.send(c, flags, sock=sock, **kwargs))
return results
# @_sockwrapper_auto_connect()
# def query(self, data: Union[str, bytes], bufsize: int = 32, eof_timeout=30, **kwargs):
# timeout_fail, send_flags = kwargs.get('timeout_fail'), kwargs.get('send_flags', kwargs.get('flags', None))
# recv_flags = kwargs.get('recv_flags', kwargs.get('flags', None))
# log.debug(" >> Sending %s bytes to %s:%s", len(data), self.host, self.port)
# self.sendall(byteify(data), flags=send_flags)
# log.debug(" >> Reading %s bytes per chunk from %s:%s", bufsize, self.host, self.port)
# return self.read_eof(bufsize, eof_timeout=eof_timeout, flags=recv_flags, timeout_fail=timeout_fail)
# @_sockwrapper_auto_connect()
# def http_request(
# self, url="/", host=AUTO_DETECTED, method="GET", user_agent=DEFAULT_USER_AGENT, extra_data: Union[STRBYTES, List[str]] = None,
# body: STRBYTES = None, eof_timeout=30, **kwargs
# ) -> Union[bytes, Awaitable[bytes]]:
# bufsize, flags, timeout_fail = kwargs.pop('bufsize', 256), kwargs.pop('flags', None), kwargs.pop('timeout_fail', False)
# data = self._http_request(url, host=host, method=method, user_agent=user_agent, extra=extra_data, body=body, **kwargs)
# self.sendall(data, flags=flags)
# return self.read_eof(bufsize, eof_timeout=eof_timeout, flags=flags, timeout_fail=timeout_fail)
def _http_request(self, url, host: str, method: str, user_agent: str = settings.DEFAULT_USER_AGENT, extra=None, **kwargs) -> bytes:
host = self.hostname if host == AUTO_DETECTED else host
return generate_http_request(url, host, method=method, user_agent=user_agent, extra_data=extra, **kwargs)
@_sockwrapper_auto_connect()
def query(self, data: Union[str, bytes], bufsize: int = 32, eof_timeout=30, sock: OpAnySocket = None, **kwargs):
timeout_fail, send_flags = kwargs.pop('timeout_fail', False), kwargs.pop('send_flags', kwargs.get('flags', None))
recv_flags = kwargs.pop('recv_flags', kwargs.pop('flags', None))
log.debug(" >> Sending %s bytes to %s:%s", len(data), self.host, self.port)
self.sendall(byteify(data), flags=send_flags, sock=sock)
log.debug(" >> Reading %s bytes per chunk from %s:%s", bufsize, self.host, self.port)
return self.read_eof(bufsize, eof_timeout=eof_timeout, flags=recv_flags, timeout_fail=timeout_fail, sock=sock, **kwargs)
@_sockwrapper_auto_connect()
def http_request(
self, url="/", host=AUTO_DETECTED, method="GET", user_agent=settings.DEFAULT_USER_AGENT,
extra_data: Union[STRBYTES, List[str]] = None, body: STRBYTES = None, eof_timeout=30, bufsize: int = 256,
conv: Optional[Callable[[Union[bytes, str]], T]] = stringify, sock: OpAnySocket = None, **kwargs
) -> Union[str, bytes, T]:
data = self._http_request(url, host=host, method=method, user_agent=user_agent, extra=extra_data, body=body, **kwargs)
kargs = dict(data=data, bufsize=bufsize, eof_timeout=eof_timeout, timeout_fail=kwargs.get('timeout_fail', False), conv=conv,
sock=sock, **kwargs)
if sock is not None: return self.query(**kargs)
# with self:
with self.tracker:
return self.query(**kargs)
@_sockwrapper_auto_connect()
def setblocking(self, flag: bool, sock: OpAnySocket = None, **kwargs):
return (self.socket if sock is None else sock).setblocking(flag)
def handle_connection(
self, sock: AnySocket, addr: Tuple[str, int], callback: Callable[["SocketWrapper", Tuple[str, int]], Any],
stop_return: Union[str, bytes] = None,
**kwargs
):
stop_compare, stop_compare_lower = kwargs.get('stop_compare', 'equal'), kwargs.get('stop_compare_lower', True)
if stop_return is not None: stop_return = stringify(stop_return)
log.info("NEW CONNECTION: %s || %s", sock, addr)
log.info("Running callback: %s(%s, %s)\n", callback.__name__, sock, addr)
orig_cres = callback(self.from_socket(sock), addr)
cres = stringify(orig_cres)
log.info("Callback return data: %s\n\n", cres)
if stop_return is not None:
if stop_compare_lower: stop_return, cres = stop_return.lower(), cres.lower()
if stop_compare.lower() in ['in', 'contain', 'contains', 'contained', 'within', 'inside']:
if stop_return in cres or strip_null(stop_return) in strip_null(cres):
raise StopLoopOnMatch("Matched stop_return. Parent should stop loop.", cres, stop_compare, stop_compare_lower)
if cres == stop_return or strip_null(cres) == strip_null(stop_return):
raise StopLoopOnMatch("Matched stop_return. Parent should stop loop.", cres, stop_compare, stop_compare_lower)
return orig_cres
@_sockwrapper_auto_connect()
def on_connect(
self, callback: Callable[["SocketWrapper", Tuple[str, int]], Any], timeout: AnyNum = None,
stop_return: Union[str, bytes] = None, **kwargs
):
if not self.server:
raise ValueError("This SocketWrapper has 'server' set to False. Can't handle incoming connections.")
if not self.binded: self.bind()
if not self.listening: self.listen(self.listen_backlog)
stop_return_match = None
while self.connected and stop_return_match is None:
log.info("Waiting for incoming connection ( %s:%s || %s ) ...", self.host, self.port, self.socket.getsockname())
sock, addr = self.accept()
try:
self.handle_connection(sock, addr, callback, stop_return, **kwargs)
except StopLoopOnMatch as e:
log.info(" !!! Stopping on_connect as 'stop_return' has been matched: %s", stop_return)
log.info(" !!! The matching message was: %s", e.match)
break
# if stop_return_match is not None:
# log.info(" !!! Stopping on_connect as 'stop_return' has been matched: %s", stop_return)
# log.info(" !!! The matching message was: %s", stop_return_match)
log.info(" !!! Disconnected. Stopping on_connect.")
class SocketWrapperThread(SafeLoopThread):
def __init__(self, *args, parent_instance: "SocketWrapper", callback, stop_return, conn_kwargs: dict = None, **kwargs):
kwargs = dict(kwargs)
self.parent_instance = parent_instance
self.callback = callback
self.conn_kwargs = empty_if(conn_kwargs, {}, itr=True)
self.stop_return = stop_return
self.stop_compare = kwargs.pop('stop_compare', 'equal')
self.stop_compare_lower = kwargs.pop('stop_compare_lower', True)
super().__init__(*args, **kwargs)
def loop(self):
pi = self.parent_instance
log.info("Waiting for incoming connection ( %s:%s || %s ) ...", pi.host, pi.port, pi.socket.getsockname())
sock, addr = pi.accept()
try:
pi.handle_connection(
sock, addr, self.callback, self.stop_return,
stop_compare=self.stop_compare, stop_compare_lower=self.stop_compare_lower, **self.conn_kwargs
)
except StopLoopOnMatch as e:
log.info(" !!! Stopping on_connect as 'stop_return' has been matched: %s", self.stop_return)
log.info(" !!! The matching message was: %s", e.match)
self.emit_stop()
def run(self):
self.parent_instance.reconnect()
return super().run()
def on_connect_thread(
self, callback: Callable[["SocketWrapper", Tuple[str, int]], Any], timeout: AnyNum = None,
stop_return: Union[str, bytes] = None, daemon=True, auto_start=True, **kwargs
) -> SocketWrapperThread:
t = self.SocketWrapperThread(parent_instance=self, callback=callback, stop_return=stop_return, **kwargs)
t.setDaemon(daemon)
if auto_start:
t.start()
return t
@classmethod
def from_socket(cls, sock: AnySocket, server=False, **kwargs) -> Union["SocketWrapper", "AsyncSocketWrapper"]:
sock_host, sock_port = sock.getsockname()
cfg = dict(
family=sock.family, proto=sock.proto, type=sock.type, fileno=sock.fileno(),
host=sock_host, port=sock_port, server=server, socket=sock, base_socket=sock
)
cfg = {**cfg, **kwargs}
return cls(**cfg)
def __getattribute__(self, item):
try:
return super().__getattribute__(item)
except AttributeError:
pass
sock: AnySocket = super().__getattribute__('socket')
return getattr(sock, item)
def __enter__(self):
# if not self._socket_ctx_mgr:
# self._socket_ctx_mgr = SocketContextManager(self)
# if not self._layer_context:
# self._layer_context = LayeredContext(self._socket_ctx_mgr, max_layers=1)
# return self._layer_context.__enter__()
self.tracker.__enter__()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
# return self.tracker.__aexit__()
# return self._layer_context.__exit__(exc_type, exc_val, exc_tb)
return self.tracker.__exit__(exc_type, exc_val, exc_tb)
class AsyncSocketWrapper(SocketWrapper):
"""
>>> from privex.helpers import AsyncSocketWrapper
>>> sw = AsyncSocketWrapper('termbin.com', 9999)
>>> url = await sw.query("HELLO world\\n\\nThis is a test\\nusing async sockets\\n\\nwith Python")
'https://termbin.com/lsd93'
>>> url = await sw.read_eof()
"""
_loop: Optional[asyncio.AbstractEventLoop]
DEFAULT_TIMEOUT = empty_if(socket.getdefaulttimeout(), settings.DEFAULT_SOCKET_TIMEOUT, zero=True)
def __init__(
self, host: str, port: int, server=False, family=-1, type=socket.SOCK_STREAM, proto=-1, fileno=None,
timeout=DEFAULT_TIMEOUT, use_ssl=False, verify_cert=False, loop=None, **kwargs
):
self._loop = loop
super().__init__(
host=host, port=port, server=server, family=family, type=type, proto=proto, fileno=fileno, timeout=timeout,
use_ssl=use_ssl, verify_cert=verify_cert, **kwargs
)
@property
def loop(self) -> asyncio.AbstractEventLoop:
if not self._loop:
self._loop = asyncio.get_event_loop()
return self._loop
async def _connect(self, host: str, port: AnyNum, sock: OpAnySocket = None, **kwargs) -> AnySocket:
port = int(port)
# sck = self.socket if sock is None else sock
if self.server:
log.debug("Binding to host '%s' on port %s", host, port)
self.bind(host, port, sock=sock)
log.debug("Successfully binded to host '%s' on port %s", host, port)
if self.auto_listen:
log.debug("Auto-listen is enabled. Calling %s.listen(%s)", self.__class__.__name__, self.listen_backlog)
self.listen(self.listen_backlog, sock=sock)
log.debug("%s is now listening on host(s) '%s' on port %s", self.__class__.__name__, host, port)
# if sock is None: self.host, self.port, self.connected = host, port, True
return self.socket
log.debug("Connecting to host %s on port %s", host, port)
if sock:
await self.loop.sock_connect(sock, (host, port))
return sock
await self.tracker.connect_async()
return self.tracker.auto_socket
# self.loop.soc
# if sock is None: self.host, self.port, self.connected = host, port, True
async def connect(
self, host: Union[str, Tuple[str, AnyNum]] = None, port: AnyNum = None, sock: OpAnySocket = None, **kwargs) -> AnySocket:
host, port = self._get_addr(host, port)
sanity = await self._connect_sanity(host, port)
if sanity is not True: return sanity
return await self._connect(host, port, sock=sock)
async def _connect_sanity(self, host, port, sock: OpAnySocket = None, **kwargs):
port = int(port)
# sock = self._auto_socket if sock is None else sock
if sock is not None or (self.connected and self._auto_socket is not None):
if host != self.host or port != int(self.port):
log.debug(f"Already connected, but {self.__class__.__name__}.connect called with different host/port than stored. "
f"Trigerring a reconnect.")
return await self.reconnect(host, port, sock=sock)
log.debug(f"Already connected, {self.__class__.__name__}.connect called with same details as previously. "
f"Returning existing socket.")
return sock
if empty(port, True, True):
raise ValueError(f"{self.__class__.__name__}.connect requires a port. Either connect(host, port) or connect( (host,port) )")
return True
async def reconnect(self, host: Union[str, Tuple[str, AnyNum]] = None, port: AnyNum = None, sock: OpAnySocket = None, **kwargs):
csn = self.__class__.__name__
self.close()
if host is None:
if port is not None:
if self.host is None:
raise ValueError(f"No host specified to {csn}.reconnect(host, port) - and no host in {csn}.host")
return await self.connect(self.host, port, sock=sock)
if all([self.host is not None, self.port is not None]):
return await self.connect(self.host, self.port, sock=sock)
if port is not None:
return await self.connect(host, port, sock=sock)
return await self.connect(host, self.port, sock=sock)
@_async_sockwrapper_auto_connect()
async def read_eof(
self, bufsize: int = 256, eof_timeout: AnyNum = 120, flags: int = None, timeout_fail=False, strip=True,
conv: Optional[Callable[[Union[bytes, str]], T]] = stringify, sock: OpAnySocket = None, **kwargs
) -> Union[str, bytes, T]:
strip_func = kwargs.get('strip_func', lambda d: strip_null(d, conv=conv))
data, total_time = b'', 0.0
while True:
st_time = time.time()
chunk = await self.recv(bufsize, flags, timeout=kwargs.get('read_timeout', AUTO), sock=sock)
if not chunk:
log.debug("Finished reading until EOF")
break
e_time = time.time()
total_time += (e_time - st_time)
data += chunk
if not empty(eof_timeout, True) and total_time > eof_timeout:
log.error("Giving up, spent over %f seconds (%f) reading until EOF for host %s", eof_timeout, total_time, self.host)
if timeout_fail:
raise TimeoutError(f"Giving up, spent over {eof_timeout} seconds ({total_time}) reading until EOF for host {self.host}")
break
return strip_func(data) if strip else data
@_async_sockwrapper_auto_connect()
async def recv(self, bufsize: int, flags: int = None, sock: OpAnySocket = None, timeout: Union[float, int] = AUTO, **kwargs) -> bytes:
timeout, sck = self.read_timeout if timeout is AUTO else timeout, self.socket if sock is None else sock
if timeout not in [None, False]:
return await asyncio.wait_for(self.loop.sock_recv(sck, bufsize), timeout)
return await self.loop.sock_recv(sck, bufsize)
@_async_sockwrapper_auto_connect()
async def recv_into(self, buf: bytearray, sock: OpAnySocket = None, **kwargs) -> int:
return await self.loop.sock_recv_into(self.socket if sock is None else sock, buf)
@_async_sockwrapper_auto_connect()
async def send(self, data: Union[str, bytes], flags: int = None, sock: OpAnySocket = None, timeout: Union[float, int] = AUTO, **kwargs):
return await self.send_timeout(data, flags, sock, timeout, **kwargs)
@_async_sockwrapper_auto_connect()
async def sendall(
self, data: Union[str, bytes], flags: int = None, sock: OpAnySocket = None, timeout: Union[float, int] = AUTO, **kwargs
):
timeout, sck = self.send_timeout if timeout is AUTO else timeout, self.socket if sock is None else sock
if timeout not in [None, False]:
return await asyncio.wait_for(self.loop.sock_sendall(sck, byteify(data)), timeout)
return await self.loop.sock_sendall(sck, byteify(data))
@_async_sockwrapper_auto_connect()
async def sendfile(
self, file: IO[bytes], offset: int = None, count: int = None, fallback: bool = True, sock: OpAnySocket = None,
timeout: Union[float, int] = AUTO, **kwargs
):
timeout, sck = self.send_timeout if timeout is AUTO else timeout, self.socket if sock is None else sock
if timeout not in [None, False]:
return await asyncio.wait_for(self.loop.sock_sendfile(sck, file, offset=offset, count=count, fallback=fallback), timeout)
return await self.loop.sock_sendfile(sck, file, offset=offset, count=count, fallback=fallback)
@_async_sockwrapper_auto_connect()
async def query(self, data: Union[str, bytes], bufsize: int = 32, eof_timeout=30, sock: OpAnySocket = None, **kwargs):
timeout_fail, send_flags = kwargs.pop('timeout_fail', False), kwargs.pop('send_flags', kwargs.get('flags', None))
recv_flags = kwargs.pop('recv_flags', kwargs.pop('flags', None))
shared_timeout = kwargs.pop('timeout', AUTO)
log.debug(" >> Sending %s bytes to %s:%s", len(data), self.host, self.port)
snd_tmout, rcv_tmout = kwargs.pop('send_timeout', shared_timeout), kwargs.pop('read_timeout', shared_timeout)
await self.sendall(byteify(data), flags=send_flags, sock=self.socket if sock is None else sock, timeout=snd_tmout)
log.debug(" >> Reading %s bytes per chunk from %s:%s", bufsize, self.host, self.port)
return await self.read_eof(
bufsize, eof_timeout=eof_timeout, flags=recv_flags, timeout_fail=timeout_fail,
sock=self.socket if sock is None else sock, read_timeout=rcv_tmout, **kwargs
)
@_async_sockwrapper_auto_connect()
async def http_request(
self, url="/", host=AUTO_DETECTED, method="GET", user_agent=settings.DEFAULT_USER_AGENT,
extra_data: Union[STRBYTES, List[str]] = None, body: STRBYTES = None, eof_timeout=30, bufsize: int = 256,
conv: Optional[Callable[[Union[bytes, str]], T]] = stringify, sock: OpAnySocket = None, **kwargs
) -> Union[str, bytes, T]:
async with self:
data = self._http_request(
url, host=host, method=method, user_agent=user_agent, extra=extra_data, body=body, sock=sock, **kwargs
)
# await self.sendall(data)
return await self.query(
data, bufsize, eof_timeout=eof_timeout, timeout_fail=kwargs.get('timeout_fail', False), conv=conv, sock=sock, **kwargs
)
# return await super().http_request(
# url, host=host, method=method, user_agent=user_agent, extra=extra_data, body=body, eof_timeout=eof_timeout, **kwargs
# )
async def accept(self, sock: OpAnySocket = None, **kwargs) -> Tuple[AnySocket, Tuple[str, int]]:
return await self.loop.sock_accept(self.socket if sock is None else sock)
async def handle_connection(
self, sock: AnySocket, addr: Tuple[str, int], callback: Callable[["AsyncSocketWrapper", Tuple[str, int]], Any],
stop_return: Union[str, bytes] = None,
**kwargs
):
stop_compare, stop_compare_lower = kwargs.get('stop_compare', 'equal'), kwargs.get('stop_compare_lower', True)
if stop_return is not None: stop_return = stringify(stop_return)
log.info("[async] NEW CONNECTION: %s || %s", sock, addr)
log.info("[async] Running callback: %s(%s, %s)\n", callback.__name__, sock, addr)
orig_cres = await await_if_needed(callback(self.from_socket(sock), addr))
cres = stringify(orig_cres)
log.info("[async] Callback return data: %s\n\n", cres)
if stop_return is not None:
if stop_compare_lower: stop_return, cres = stop_return.lower(), cres.lower()
if stop_compare.lower() in ['in', 'contain', 'contains', 'contained', 'within', 'inside']:
if stop_return in cres or strip_null(stop_return) in strip_null(cres):
raise StopLoopOnMatch("[async] Matched stop_return. Parent should stop loop.", cres, stop_compare, stop_compare_lower)
if cres == stop_return or strip_null(cres) == strip_null(stop_return):
raise StopLoopOnMatch("[async] Matched stop_return. Parent should stop loop.", cres, stop_compare, stop_compare_lower)
return orig_cres
@_sockwrapper_auto_connect()
async def on_connect(
self, callback: Callable[["AsyncSocketWrapper", Tuple[str, int]], Any], timeout: AnyNum = None,
stop_return: Union[str, bytes] = None, sock: OpAnySocket = None, **kwargs
):
if not self.server:
raise ValueError("This AsyncSocketWrapper has 'server' set to False. Can't handle incoming connections.")
if not self.binded: self.bind(sock=self.socket if sock is None else sock)
if not self.listening: self.listen(self.listen_backlog, sock=self.socket if sock is None else sock)
# if stop_return is not None: stop_return = stringify(stop_return)
# stop_compare, stop_compare_lower = kwargs.get('stop_compare', 'equal'), kwargs.get('stop_compare_lower', True)
stop_return_match = None
while self.connected and stop_return_match is None:
log.info("[async] Waiting for incoming connection ( %s:%s || %s ) ...", self.host, self.port, self.socket.getsockname())
sock, addr = await self.accept()
try:
with sock:
await self.handle_connection(sock, addr, callback, stop_return, **kwargs)
except StopLoopOnMatch as e:
log.info(" !!! Stopping on_connect as 'stop_return' has been matched: %s", stop_return)
log.info(" !!! The matching message was: %s", e.match)
break
# log.info("[async] NEW CONNECTION: %s || %s", sock, addr)
# log.info("[async] Running callback: %s(%s, %s)\n", callback.__name__, sock, addr)
# cres = await await_if_needed(callback(self.from_socket(sock), addr))
# cres = stringify(cres)
# log.info("[async] Callback return data: %s\n\n", cres)
# if stop_return is not None:
# if stop_compare_lower:
# stop_return, cres = stop_return.lower(), cres.lower()
# if stop_compare.lower() in ['in', 'contain', 'contains', 'contained', 'within', 'inside']:
# if stop_return in cres or strip_null(stop_return) in strip_null(cres):
# stop_return_match = cres
# break
# if cres == stop_return or strip_null(cres) == strip_null(stop_return):
# stop_return_match = cres
# break
# if stop_return_match is not None:
# log.info(" !!! Stopping on_connect as 'stop_return' has been matched: %s", stop_return)
# log.info(" !!! The matching message was: %s", stop_return_match)
log.info(" !!! Disconnected. Stopping on_connect.")
async def __aenter__(self):
# if not self._socket_ctx_mgr:
# self._socket_ctx_mgr = SocketContextManager(self)
# if not self._layer_context:
# self._layer_context = LayeredContext(self._socket_ctx_mgr, max_layers=1)
# return await self._layer_context.__aenter__()
return await self.tracker.__aenter__()
async def __aexit__(self, exc_type, exc_val, exc_tb):
# return await self._layer_context.__aexit__(exc_type, exc_val, exc_tb)
return await self.tracker.__aexit__(exc_type, exc_val, exc_tb)
async def send_data_async(
host: str, port: int, data: Union[bytes, str, Iterable], timeout: AnyNum = None, **kwargs
) -> Optional[Union[str, bytes]]:
"""
>>> await send_data_async('termbin.com', 9999, "hello world\\nthis is a test\\n\\nlorem ipsum dolor\\n")
'https://termbin.com/oi07'
:param host:
:param port:
:param data:
:param timeout:
:param kwargs:
:return:
"""
fhost = f"({host}):{port}"
chunk_size = int(kwargs.get('chunk', kwargs.get('chunk_size', 64)))
string_result = is_true(kwargs.get('string_result', True))
strip_result = is_true(kwargs.get('strip_result', True))
fail = is_true(kwargs.get('fail', True))
ip_version = kwargs.get('ip_version', 'any')
timeout = empty_if(timeout, empty_if(socket.getdefaulttimeout(), 15, zero=True), zero=True)
is_iter, data_iter = False, None
if data is not None:
if isinstance(data, (str, bytes, int, float)):
data = byteify(data)
else:
try:
data_iter = iter(data)
is_iter = True
except TypeError:
# noinspection PyTypeChecker
data = byteify(data)
loop = asyncio.get_event_loop()
try:
s_ver = socket.AF_INET
ip = await resolve_ip_async(host, ip_version)
if ip_is_v6(ip): s_ver = socket.AF_INET6
fhost += f" (IP: {ip})"
with socket.socket(s_ver, socket.SOCK_STREAM) as s:
s.settimeout(float(timeout))
log.debug(" [...] Connecting to host: %s", fhost)
await loop.sock_connect(s, (ip, port))
log.debug(" [+++] Connected to %s\n", fhost)
if data is None:
log.debug(" [!!!] 'data' is None. Not transmitting any data to the host.")
elif is_iter:
i = 1
for c in data_iter:
log.debug(" [...] Sending %s byte chunk (%s)\n", len(c), i)
await loop.sock_sendall(s, c)
else:
# We use 'sendall' to reliably send the entire contents of 'data' to the service we're connected to.
log.debug(" [...] Sending %s bytes to %s ...\n", len(data), fhost)
await loop.sock_sendall(s, data)
# s.sendall(data)
log.debug(" >> Reading response ...")
res = b''
i = 1
while True:
chunk = await loop.sock_recv(s, chunk_size)
if not chunk: break
res += chunk
log.debug(" [...] Read %s byte chunk (%s)\n", len(chunk), i)
i += 1
if string_result:
res = stringify(res)
if strip_result: res = res.strip("\x00").strip().strip("\x00").strip()
log.debug(" [+++] Got result ( %s bytes ) \n", len(res))
except (socket.timeout, ConnectionRefusedError, ConnectionResetError, socket.gaierror) as e:
if fail:
raise e
log.warning("Exception while connecting + sending data to: %s - reason: %s %s", fhost, type(e), str(e))
return None
return res
def send_data(
host: str, port: int, data: Optional[Union[bytes, str, Iterable]] = None, timeout: Union[int, float] = None, **kwargs
) -> Optional[Union[str, bytes]]:
"""
>>> from privex.helpers import send_data
>>> send_data('termbin.com', 9999, "hello world\\nthis is a test\\n\\nlorem ipsum dolor\\n")
'https://termbin.com/oi07'
:param str host: The hostname or IPv4/v6 address to connect to
:param port: The port number to connect to on ``host``
:param bytes|str|iter data: The data to send to ``host:port`` via a TCP socket. Generally :class:`bytes` / :class:`str`.
Can be an iterator/generator to send data in chunks. Can be ``None`` to disable sending data, instead
only receiving and returning data.
:param float|int timeout: Socket timeout. If not passed, uses the default from :func:`socket.getdefaulttimeout`.
If the global default timeout is ``None``, then falls back to ``15``
:param kwargs:
:keyword int chunk: (Default: ``64``) Maximum number of bytes to read into buffer per socket receive call.
:keyword bool string_result: (Default: ``True``) If ``True``, the response sent by the server will be casted into a :class:`str`
before returning it.
:keyword bool strip_result: (Default: ``True``) This argument only works if ``string_result`` is also True.
If both ``string_result`` and ``strip_result`` are ``True``, the response sent by the server will
have whitespace, newlines, and null bytes trimmed from the start and end after it's casted into a string.
:keyword bool fail: (Default: ``True``) If ``True``, will raise exceptions when connection errors occur. When ``False``, will simply
``None`` if there are connection exceptions raised during this function's execution.
:keyword str|int ip_version: (Default: ``any``)
:return:
"""
fhost = f"({host}):{port}"
chunk_size = int(kwargs.get('chunk', kwargs.get('chunk_size', 64)))
string_result = is_true(kwargs.get('string_result', True))
strip_result = is_true(kwargs.get('strip_result', True))
fail = is_true(kwargs.get('fail', True))
ip_version = kwargs.get('ip_version', 'any')
timeout = empty_if(timeout, empty_if(socket.getdefaulttimeout(), 15, zero=True), zero=True)
is_iter, data_iter, is_v6, v4_address, host_is_ip = False, None, False, None, False
if data is not None:
if isinstance(data, (str, bytes, int, float)):
data = byteify(data)
else:
try:
data_iter = iter(data)
is_iter = True
except TypeError:
# noinspection PyTypeChecker
data = byteify(data)
try:
ip_network(host)
host_is_ip = True
except (TypeError, ValueError) as e:
host_is_ip = False
try:
# First we resolve the IP address of 'host', so we can detect whether we're connecting to an IPv4 or IPv6 host,
# letting us adjust the AF_INET variable accordingly.
s_ver = socket.AF_INET
ip = resolve_ip(host, ip_version)
if ip_is_v6(ip):
s_ver, is_v6 = socket.AF_INET6, True
if not host_is_ip:
try:
v4_address = resolve_ip(host, 'v4')
except (socket.timeout, ConnectionRefusedError, ConnectionResetError, socket.gaierror, AttributeError) as e:
log.warning(
"Warning: failed to resolve IPv4 address for %s (to be used as a backup if IPv6 is broken). Reason: %s %s ",
type(e), str(e)
)
fhost += f" (IP: {ip})"
except (socket.timeout, ConnectionRefusedError, ConnectionResetError, socket.gaierror) as e:
if fail:
raise e
log.warning("Exception while connecting + sending data to: %s - reason: %s %s", fhost, type(e), str(e))
return None
try:
with socket.socket(s_ver, socket.SOCK_STREAM) as s:
# Once we have our socket object, we set the timeout (by default it could hang forever), and open the connection.
s.settimeout(timeout)
log.debug(" [...] Connecting to host: %s", fhost)
s.connect((ip, port))
log.debug(" [+++] Connected to %s\n", fhost)
if data is None:
log.debug(" [!!!] 'data' is None. Not transmitting any data to the host.")
elif is_iter:
i = 1
for c in data_iter:
log.debug(" [...] Sending %s byte chunk (%s)\n", len(c), i)
s.sock_sendall(c)
else:
# We use 'sendall' to reliably send the entire contents of 'data' to the service we're connected to.
log.debug(" [...] Sending %s bytes to %s ...\n", len(data), fhost)
s.sendall(data)
# Once we've sent 'data',
log.debug(" >> Reading response ...")
res = b''
i = 1
while True:
chunk = s.recv(chunk_size)
if not chunk: break
res += chunk
log.debug(" [...] Read %s byte chunk (%s)\n", len(chunk), i)
i += 1
if string_result:
res = stringify(res)
if strip_result: res = res.strip("\x00").strip().strip("\x00").strip()
log.debug(" [+++] Got result ( %s bytes ) \n", len(res))
except (socket.timeout, ConnectionRefusedError, ConnectionResetError, socket.gaierror) as e:
log.warning("Exception while connecting + sending data to: %s - reason: %s %s", fhost, type(e), str(e))
if is_v6 and not empty(v4_address):
log.warning(
"Retrying connection to %s over IPv4 instead of IPv6. || IPv6 address: %s || IPv4 address: %s ",
fhost, ip, v4_address
)
return send_data(host, port, data, timeout=timeout, **kwargs)
if fail:
raise e
return None
return res
def upload_termbin(data: Union[bytes, str], timeout: Union[int, float] = None, **kwargs) -> str:
"""
Upload the :class:`bytes` / :class:`string` ``data`` to the pastebin service `TermBin`_ ,
using the hostname and port defined in :attr:`privex.helpers.settings.TERMBIN_HOST`
and :attr:`privex.helpers.settings.TERMBIN_PORT`
NOTE - An AsyncIO version of this function is available: :func:`.upload_termbin_async`
Returns the `TermBin`_ URL as a string - which is a raw download / viewing link for the paste.
.. _TermBin: https://termbin.com
>>> my_data = "hello world\\nthis is a test\\n\\nlorem ipsum dolor\\n"
>>> upload_termbin(my_data)
'https://termbin.com/kerjk'
:param bytes|str data: The data to upload to `TermBin`_ - as either :class:`str` or :class:`bytes`
:param float|int timeout: Socket timeout. If not passed, uses the default from :func:`socket.getdefaulttimeout`.
If the global default timeout is ``None``, then falls back to ``15``
:return str url: The `TermBin`_ URL to your paste as a string - which is a raw download / viewing link for the paste.
"""
data = byteify(data)
log.info(" [...] Uploading %s bytes to termbin ...\n", len(data))
res = send_data(settings.TERMBIN_HOST, settings.TERMBIN_PORT, data, timeout=timeout, **kwargs)
log.info(" [+++] Got termbin link: %s \n", res)
return res
def upload_termbin_file(filename: str, timeout: int = 15, **kwargs) -> str:
"""
Uploads the file ``filename`` to `TermBin`_ and returns the paste URL as a string.
.. NOTE:: An AsyncIO version of this function is available: :func:`.upload_termbin_file_async`
.. NOTE:: If the data you want to upload is already loaded into a variable - you can use :func:`.upload_termbin` instead,
which accepts your data directly - through a :class:`str` or :class:`bytes` parameter
.. _TermBin: https://termbin.com
:param str filename: The path (absolute or relative) to the file you want to upload to `TermBin`_ - as a :class:`str`
:param float|int timeout: Socket timeout. If not passed, uses the default from :func:`socket.getdefaulttimeout`.
If the global default timeout is ``None``, then falls back to ``15``
:return str url: The `TermBin`_ URL to your paste as a string - which is a raw download / viewing link for the paste.
"""
log.info(" >> Uploading file '%s' to termbin", filename)
with open(filename, 'rb') as fh:
log.debug(" [...] Opened file %s - reading contents into RAM...", filename)
data = fh.read()
log.debug(" [+++] Loaded file into RAM. Total size: %s bytes", len(data))
res = upload_termbin(data, timeout=timeout, **kwargs)
log.info(" [+++] Uploaded file %s to termbin. Got termbin link: %s \n", filename, res)
return res
async def upload_termbin_async(data: Union[bytes, str], timeout: Union[int, float] = None) -> str:
"""
Upload the :class:`bytes` / :class:`string` ``data`` to the pastebin service `TermBin`_ ,
using the hostname and port defined in :attr:`privex.helpers.settings.TERMBIN_HOST`
and :attr:`privex.helpers.settings.TERMBIN_PORT`
NOTE - A synchronous (non-async) version of this function is available: :func:`.upload_termbin`
Returns the `TermBin`_ URL as a string - which is a raw download / viewing link for the paste.
.. _TermBin: https://termbin.com
>>> my_data = "hello world\\nthis is a test\\n\\nlorem ipsum dolor\\n"
>>> await upload_termbin_async(my_data)
'https://termbin.com/kerjk'
:param bytes|str data: The data to upload to `TermBin`_ - as either :class:`str` or :class:`bytes`
:param float|int timeout: Socket timeout. If not passed, uses the default from :func:`socket.getdefaulttimeout`.
If the global default timeout is ``None``, then falls back to ``15``
:return str url: The `TermBin`_ URL to your paste as a string - which is a raw download / viewing link for the paste.
"""
data = byteify(data)
log.info(" [...] Uploading %s bytes to termbin ...\n", len(data))
res = await send_data_async(settings.TERMBIN_HOST, settings.TERMBIN_PORT, data, timeout=timeout)
log.info(" [+++] Got termbin link: %s \n", res)
return res
async def upload_termbin_file_async(filename: str, timeout: int = 15) -> str:
"""
Uploads the file ``filename`` to `TermBin`_ and returns the paste URL as a string.
.. NOTE:: A synchronous (non-async) version of this function is available: :func:`.upload_termbin_file`
.. NOTE:: If the data you want to upload is already loaded into a variable - you can use :func:`.upload_termbin_async` instead,
which accepts your data directly - through a :class:`str` or :class:`bytes` parameter
.. _TermBin: https://termbin.com
:param str filename: The path (absolute or relative) to the file you want to upload to `TermBin`_ - as a :class:`str`
:param float|int timeout: Socket timeout. If not passed, uses the default from :func:`socket.getdefaulttimeout`.
If the global default timeout is ``None``, then falls back to ``15``
:return str url: The `TermBin`_ URL to your paste as a string - which is a raw download / viewing link for the paste.
"""
log.info(" >> Uploading file '%s' to termbin", filename)
with open(filename, 'rb') as fh:
log.debug(" [...] Opened file %s - reading contents into RAM...", filename)
data = fh.read()
log.debug(" [+++] Loaded file into RAM. Total size: %s bytes", len(data))
res = await upload_termbin_async(data, timeout=timeout)
log.info(" [+++] Uploaded file %s to termbin. Got termbin link: %s \n", filename, res)
return res
| 54,535 | 246 | 3,335 |
f0a3f804aa451fdb3b34d8b44bce2b427e244b53 | 324 | py | Python | ch5/fibonacci.first.py | ldmcdaniel/learning_python | 63717c397cd75e45a8aef909d4b601466cd6036a | [
"MIT"
] | 30 | 2016-10-28T18:14:15.000Z | 2021-08-29T15:20:56.000Z | ch5/fibonacci.first.py | ldmcdaniel/learning_python | 63717c397cd75e45a8aef909d4b601466cd6036a | [
"MIT"
] | null | null | null | ch5/fibonacci.first.py | ldmcdaniel/learning_python | 63717c397cd75e45a8aef909d4b601466cd6036a | [
"MIT"
] | 31 | 2016-09-10T22:47:12.000Z | 2022-03-13T04:50:35.000Z | def fibonacci(N):
"""Return all fibonacci numbers up to N. """
result = [0]
next_n = 1
while next_n <= N:
result.append(next_n)
next_n = sum(result[-2:])
return result
print(fibonacci(0)) # [0]
print(fibonacci(1)) # [0, 1, 1]
print(fibonacci(50)) # [0, 1, 1, 2, 3, 5, 8, 13, 21, 34]
| 23.142857 | 57 | 0.549383 | def fibonacci(N):
"""Return all fibonacci numbers up to N. """
result = [0]
next_n = 1
while next_n <= N:
result.append(next_n)
next_n = sum(result[-2:])
return result
print(fibonacci(0)) # [0]
print(fibonacci(1)) # [0, 1, 1]
print(fibonacci(50)) # [0, 1, 1, 2, 3, 5, 8, 13, 21, 34]
| 0 | 0 | 0 |
09743b8d5ae4a032a89d80ec7947cf104ae4dd31 | 5,751 | py | Python | robot_ws/src/cloudwatch_robot/launch/monitoring.launch.py | husarion/aws-robomaker-sample-application-cloudwatch | 9a381009aca558e65c03077965a349fb93c59d99 | [
"MIT-0"
] | null | null | null | robot_ws/src/cloudwatch_robot/launch/monitoring.launch.py | husarion/aws-robomaker-sample-application-cloudwatch | 9a381009aca558e65c03077965a349fb93c59d99 | [
"MIT-0"
] | null | null | null | robot_ws/src/cloudwatch_robot/launch/monitoring.launch.py | husarion/aws-robomaker-sample-application-cloudwatch | 9a381009aca558e65c03077965a349fb93c59d99 | [
"MIT-0"
] | 1 | 2022-03-04T10:36:17.000Z | 2022-03-04T10:36:17.000Z | import os
import sys
import yaml
import launch
import launch_ros.actions
from launch.conditions import IfCondition
from launch.substitutions import LaunchConfiguration
from launch.substitutions import PythonExpression
from ament_index_python.packages import get_package_share_directory
if __name__ == '__main__':
generate_launch_description()
| 47.139344 | 141 | 0.677621 | import os
import sys
import yaml
import launch
import launch_ros.actions
from launch.conditions import IfCondition
from launch.substitutions import LaunchConfiguration
from launch.substitutions import PythonExpression
from ament_index_python.packages import get_package_share_directory
def generate_launch_description():
default_metrics_config = os.path.join(get_package_share_directory('cloudwatch_robot'), 'config', 'cloudwatch_metrics_config.yaml')
default_logs_config = os.path.join(get_package_share_directory('cloudwatch_robot'), 'config', 'cloudwatch_logs_config.yaml')
with open(default_metrics_config, 'r') as f:
config_text = f.read()
config_yaml = yaml.safe_load(config_text)
default_aws_metrics_namespace = config_yaml['cloudwatch_metrics_collector']['ros__parameters']['aws_metrics_namespace']
default_aws_region = config_yaml['cloudwatch_metrics_collector']['ros__parameters']['aws_client_configuration']['region']
with open(default_logs_config, 'r') as f:
config_text = f.read()
config_yaml = yaml.safe_load(config_text)
default_log_group_name = config_yaml['cloudwatch_logger']['ros__parameters']['log_group_name']
default_aws_region = os.environ.get('ROS_AWS_REGION', default_aws_region)
launch_actions = [
launch.actions.DeclareLaunchArgument(
name='aws_region',
description='AWS region override, defaults to config .yaml if unset',
default_value=default_aws_region
),
launch.actions.DeclareLaunchArgument(
name='launch_id',
description='Used for resource name suffix if specified',
default_value=launch.substitutions.EnvironmentVariable('LAUNCH_ID')
),
launch.actions.DeclareLaunchArgument(
name='metrics_node_name',
default_value="cloudwatch_metrics_collector"
),
launch.actions.DeclareLaunchArgument(
name='aws_metrics_namespace',
default_value=default_aws_metrics_namespace
),
launch.actions.DeclareLaunchArgument(
name='logger_node_name',
default_value='cloudwatch_logger'
),
launch.actions.DeclareLaunchArgument(
name='log_group_name',
default_value=default_log_group_name
),
launch_ros.actions.Node(
package='cloudwatch_robot',
node_executable='monitor_speed',
node_name='monitor_speed',
output='log'
),
launch_ros.actions.Node(
package='cloudwatch_robot',
node_executable='monitor_obstacle_distance',
node_name='monitor_obstacle_distance',
output='log'
),
launch_ros.actions.Node(
package='cloudwatch_robot',
node_executable='monitor_distance_to_goal',
node_name='monitor_distance_to_goal',
output='log'
),
launch.actions.SetLaunchConfiguration(
name='aws_metrics_namespace',
value=PythonExpression(["'", LaunchConfiguration('aws_metrics_namespace'), "-", LaunchConfiguration('launch_id'), "'"]),
condition=IfCondition(PythonExpression(["'true' if '", LaunchConfiguration('launch_id'), "' else 'false'"]))
),
launch.actions.SetLaunchConfiguration(
name='log_group_name',
value=PythonExpression(["'", LaunchConfiguration('log_group_name'), "-", LaunchConfiguration('launch_id'), "'"]),
condition=IfCondition(PythonExpression(["'true' if '", LaunchConfiguration('launch_id'), "' else 'false'"]))
),
launch.actions.IncludeLaunchDescription(
launch.launch_description_sources.PythonLaunchDescriptionSource(
os.path.join(get_package_share_directory('health_metric_collector'), 'launch', 'health_metric_collector.launch.py')
),
launch_arguments={
'config_file': os.path.join(get_package_share_directory('cloudwatch_robot'), 'config', 'health_metrics_config.yaml')
}.items()
),
launch.actions.IncludeLaunchDescription(
launch.launch_description_sources.PythonLaunchDescriptionSource(
os.path.join(get_package_share_directory('cloudwatch_metrics_collector'), 'launch', 'cloudwatch_metrics_collector.launch.py')
),
launch_arguments={
'node_name': launch.substitutions.LaunchConfiguration('metrics_node_name'),
'config_file': os.path.join(get_package_share_directory('cloudwatch_robot'), 'config', 'cloudwatch_metrics_config.yaml'),
'aws_region': launch.substitutions.LaunchConfiguration('aws_region'),
'aws_metrics_namespace': launch.substitutions.LaunchConfiguration('aws_metrics_namespace'),
}.items()
),
launch.actions.IncludeLaunchDescription(
launch.launch_description_sources.PythonLaunchDescriptionSource(
os.path.join(get_package_share_directory('cloudwatch_logger'), 'launch', 'cloudwatch_logger.launch.py')
),
launch_arguments={
'node_name': launch.substitutions.LaunchConfiguration('logger_node_name'),
'config_file': os.path.join(get_package_share_directory('cloudwatch_robot'), 'config', 'cloudwatch_logs_config.yaml'),
'aws_region': launch.substitutions.LaunchConfiguration('aws_region'),
'log_group_name': launch.substitutions.LaunchConfiguration('log_group_name'),
}.items()
),
]
ld = launch.LaunchDescription(launch_actions)
return ld
if __name__ == '__main__':
generate_launch_description()
| 5,377 | 0 | 23 |
67e5af01030aade7fd237d0de541c611aee0fcd3 | 1,165 | py | Python | lambdata_jpalex/__init__.py | extrajp2014/lambdata | bbe126516533ebe334c2b9f3bdc8dfa0bd1755be | [
"MIT"
] | null | null | null | lambdata_jpalex/__init__.py | extrajp2014/lambdata | bbe126516533ebe334c2b9f3bdc8dfa0bd1755be | [
"MIT"
] | null | null | null | lambdata_jpalex/__init__.py | extrajp2014/lambdata | bbe126516533ebe334c2b9f3bdc8dfa0bd1755be | [
"MIT"
] | null | null | null | import numpy as np
class Statistic:
"""
Contains statistic functions helper
"""
def __init__(self, numbers = [1,2], confidence=0.95):
"""
numbers = array of numbers
confidence = confidence interval, default is 95%
"""
self.numbers = numbers
self.confidence = confidence
def mean(self):
"""
Calculate the mean of dataset
"""
mean = sum(self.numbers)/len(self.numbers)
return mean
def variance(self):
"""
Calculate the variances of sample dataset (n - 1)
"""
mean = sum(self.numbers) / len(self.numbers)
differences = [(x-mean)**2 for x in self.numbers]
variance = sum(differences)/(len(differences)-1)
return variance
def stdev(self):
"""
Calculate the standard deviation of sample dataset (n - 1)
"""
mean = sum(self.numbers) / len(self.numbers)
differences = [(x-mean)**2 for x in self.numbers]
variance = sum(differences)/(len(differences)-1)
stdev = np.sqrt(variance)
return stdev
test = Statistic().stdev()
print(test)
| 26.477273 | 66 | 0.571674 | import numpy as np
class Statistic:
"""
Contains statistic functions helper
"""
def __init__(self, numbers = [1,2], confidence=0.95):
"""
numbers = array of numbers
confidence = confidence interval, default is 95%
"""
self.numbers = numbers
self.confidence = confidence
def mean(self):
"""
Calculate the mean of dataset
"""
mean = sum(self.numbers)/len(self.numbers)
return mean
def variance(self):
"""
Calculate the variances of sample dataset (n - 1)
"""
mean = sum(self.numbers) / len(self.numbers)
differences = [(x-mean)**2 for x in self.numbers]
variance = sum(differences)/(len(differences)-1)
return variance
def stdev(self):
"""
Calculate the standard deviation of sample dataset (n - 1)
"""
mean = sum(self.numbers) / len(self.numbers)
differences = [(x-mean)**2 for x in self.numbers]
variance = sum(differences)/(len(differences)-1)
stdev = np.sqrt(variance)
return stdev
test = Statistic().stdev()
print(test)
| 0 | 0 | 0 |
4fd062e0af2520762bf197fa4024a682543bf356 | 9,754 | py | Python | backend/celery/api.py | OriHoch/wikimedia-crosswatch | 05b009cfd9e17eb0e252d37c8a22a93cade5c6a4 | [
"ISC"
] | null | null | null | backend/celery/api.py | OriHoch/wikimedia-crosswatch | 05b009cfd9e17eb0e252d37c8a22a93cade5c6a4 | [
"ISC"
] | null | null | null | backend/celery/api.py | OriHoch/wikimedia-crosswatch | 05b009cfd9e17eb0e252d37c8a22a93cade5c6a4 | [
"ISC"
] | null | null | null | # -*- coding: utf-8 -*-
# ISC License
# Copyright (C) 2015 Jan Lebert
from __future__ import absolute_import
from __future__ import unicode_literals
from collections import namedtuple
import requests
from requests_oauthlib import OAuth1
from redis import StrictRedis
import json
from datetime import datetime, timedelta
from mw.api import Session as RevertsSession
from mw.lib.reverts import api as reverts
from .. import config
from . import logger
| 34.224561 | 78 | 0.554849 | # -*- coding: utf-8 -*-
# ISC License
# Copyright (C) 2015 Jan Lebert
from __future__ import absolute_import
from __future__ import unicode_literals
from collections import namedtuple
import requests
from requests_oauthlib import OAuth1
from redis import StrictRedis
import json
from datetime import datetime, timedelta
from mw.api import Session as RevertsSession
from mw.lib.reverts import api as reverts
from .. import config
from . import logger
class MediaWiki(object):
def __init__(self, host="https://en.wikipedia.org", path="/w/api.php",
access_token=None, redis_channel=None):
self.api_url = host + path
self.user_agent = \
"crosswatch (https://tools.wmflabs.org/crosswatch;" +\
"crosswatch@tools.wmflabs.org) python-requests/" +\
requests.__version__
self.headers = {'User-Agent': self.user_agent}
if access_token:
# Construct an auth object with the consumer and access tokens
access_token = json.loads(access_token)
self.auth = OAuth1(config.consumer_token.key,
client_secret=config.consumer_token.secret,
resource_owner_key=access_token['key'],
resource_owner_secret=access_token['secret'])
else:
self.auth = None
self.redis_channel = redis_channel
self.redis = StrictRedis(
host=config.redis_server,
port=config.redis_port,
db=config.redis_db,
decode_responses=True
)
self.ores_url = 'http://ores.wmflabs.org/scores'
def publish(self, message):
if not self.redis_channel:
raise Exception("No redis channel set to publish to")
self.redis.publish(self.redis_channel, json.dumps(message))
@staticmethod
def timestamp(daysdelta=0):
"""
:param daysdelta: calculate timestamp in ´daysdelta´ days
:return: MediaWIki timestamp format
"""
now = datetime.utcnow()
delta = timedelta(days=daysdelta)
time = now + delta
return time.strftime("%Y%m%d%H%M%S")
def _handle_response(self, response):
if 'error' in response:
logger.error(response['error'])
if self.redis_channel:
if response['error']['code'] == \
'mwoauth-invalid-authorization':
self.publish({'msgtype': 'loginerror',
'errorinfo': response['error']['info']})
else:
self.publish({'msgtype': 'apierror',
'errorcode': response['error']['code'],
'errorinfo': response['error']['info']})
raise Exception(response['error']['code'], str(response))
if 'warnings' in response:
logger.warn("API-request warning: " + str(response['warnings']))
def query(self, params):
params['format'] = "json"
response = requests.get(self.api_url, params=params, auth=self.auth,
headers=self.headers)
response.raise_for_status()
response = response.json()
self._handle_response(response)
return response
def query_gen(self, params):
params['action'] = "query"
last_continue = {'continue': ""}
while True:
p = params.copy()
p.update(last_continue)
response = self.query(p)
if 'query' in response:
yield response['query']
if 'continue' not in response:
break
last_continue = response['continue']
def post(self, params, payload, token_type='csrf'):
params['format'] = "json"
token = self.get_token(token_type)
payload['token'] = token
response = requests.post(self.api_url, params=params, data=payload,
auth=self.auth, headers=self.headers)
self._handle_response(json.loads(response.text))
def get_token(self, type='csrf'):
params = {'action': "query",
'meta': "tokens",
'type': type}
r = self.query(params)
token = r['query']['tokens'][type + 'token']
return token
def user_info(self):
params = {
'action': "query",
'meta': "userinfo",
'uiprop': "rights"
}
response = self.query(params)
response = response['query']['userinfo']
User = namedtuple('user', ['name', 'rights'])
user = User(response['name'], response['rights'])
return user
def user_rights(self, username):
"""
User rights for a given username
:param username:
:return: list of rights
"""
params = {
'action': "query",
'list': "users",
'usprop': "rights",
'ususers': username
}
response = self.query(params)
return response['query']['users'][0]['rights']
def diff(self, pageid, old_revid, new_revid, uselang=""):
params = {
'action': "query",
'prop': "revisions",
'rvstartid': old_revid,
'rvendid': old_revid,
'rvdiffto': new_revid,
'pageids': pageid,
'uselang': uselang,
'formatversion': 2
}
response = self.query(params)
diff = response['query']['pages'][0]['revisions'][0]['diff']['body']
return diff
def wikis(self, use_cache=True):
key = config.redis_prefix + 'cached_wikis'
wikis = self.redis.get(key)
if use_cache and wikis:
wikis = json.loads(wikis)
else:
# Cache miss, do api request and fill cache
wikis = self._get_wikis()
self.redis.setex(key, 172800, json.dumps(wikis)) # 2 days exp.
return wikis
def _get_wikis(self):
params = {'action': "sitematrix"}
data = self.query(params)
blacklist_wikis = ['loginwiki']
flaggedrevs_wikis = self._flaggedrevs_wikis()
wikis = {}
for key, val in data['sitematrix'].items():
if key == 'count':
continue
if 'code' in val:
for site in val['site']:
wikis[site['dbname']] = self._create_wiki(
site, val['code'], val['name'],
flaggedrevs_wikis, blacklist_wikis)
else:
for site in val:
wikis[site['dbname']] = self._create_wiki(
site, '', '', flaggedrevs_wikis, blacklist_wikis)
return wikis
def _create_wiki(self, site, langcode, langname, flaggedrevs_wikis,
blacklist_wikis):
wiki = {
'lang': langcode,
'langname': langname,
'url': site['url'].replace("http://", "https://"),
'dbname': site['dbname'],
'group': site['code']
}
if wiki['group'] == 'wiki':
wiki['group'] = 'wikipedia'
inactive_codes = ['closed', 'private', 'fishbowl']
if any([key in site for key in inactive_codes]):
wiki['closed'] = True
if site['dbname'] in blacklist_wikis:
wiki['closed'] = True
if site['dbname'] in flaggedrevs_wikis:
wiki['flaggedrevs'] = True
return wiki
def _flaggedrevs_wikis(self):
url = "https://noc.wikimedia.org/conf/dblists/flaggedrevs.dblist"
response = requests.get(url, headers=self.headers)
response.raise_for_status()
return response.text.splitlines()
def ores_context_exists(self, dbname, model='reverted', cached=True):
"""Checks if ORES context for a wiki exists"""
key = config.redis_prefix + 'ores' + model
if not self.redis.exists(key) or not cached:
self._ores_contexts(model)
return self.redis.sismember(key, dbname)
def _ores_contexts(self, model):
"""Fill cache"""
pipe = self.redis.pipeline()
key = config.redis_prefix + 'ores' + model
pipe.delete(key)
contexts = requests.get(self.ores_url, headers=self.headers)
contexts.raise_for_status()
contexts = contexts.json()['contexts']
for context in contexts:
models = requests.get('{}/{}/'.format(self.ores_url, context),
headers=self.headers)
models.raise_for_status()
models = models.json()['models']
if model in models:
pipe.sadd(key, context)
pipe.expire(key, 172800) # 2 days exp.
pipe.execute()
def ores_scores(self, dbname, revids, model='reverted'):
"""Get ORES scores for revision ids"""
url = '{}/{}/{}/'.format(self.ores_url, dbname, model)
revids = '|'.join([str(id) for id in revids])
params = {'revids': revids}
response = requests.get(url, params=params, headers=self.headers)
if response.status_code != requests.codes.ok:
raise Exception('ORES error code {} for {} with params {}'.format(
response.status_code, dbname, str(params)), response.text)
return response.json()
def was_reverted(self, revision):
"""
Checks if a revision was reverted
:param revision: a revision dict containing ‘revid’ and ‘pageid’
"""
session = RevertsSession(self.api_url, user_agent=self.user_agent)
return reverts.check_rev(session, revision)
| 6,260 | 3,027 | 23 |