repo_name stringlengths 6 97 | path stringlengths 3 341 | text stringlengths 8 1.02M |
|---|---|---|
gregmbi/polyaxon | core/polyaxon/stores/base_store.py | <filename>core/polyaxon/stores/base_store.py
#!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from concurrent import futures
from typing import Callable, List, Optional, Tuple
class StoreMixin:
def ls(self, path):
raise NotImplementedError
def list(self, *args, **kwargs):
raise NotImplementedError
def delete(self, *args, **kwargs):
raise NotImplementedError
def download_file(self, *args, **kwargs):
raise NotImplementedError
def download_dir(self, *args, **kwargs):
raise NotImplementedError
def upload_file(self, *args, **kwargs):
raise NotImplementedError
def upload_dir(self, *args, **kwargs):
raise NotImplementedError
def init_pool(self, workers: int = 0) -> Tuple[futures.ThreadPoolExecutor, List]:
pool = None
future_results = []
if workers:
pool = futures.ThreadPoolExecutor(workers)
return pool, future_results
def submit_pool(
self,
fn: Callable,
workers: int,
pool: Optional[futures.ThreadPoolExecutor],
future_results: Optional[List],
**kwargs
) -> Optional[List]:
if workers:
future_result = pool.submit(fn, **kwargs)
future_results.append(future_result)
else:
fn(**kwargs)
return future_results
def close_pool(self, pool: Optional[futures.ThreadPoolExecutor]):
if pool:
pool.shutdown(wait=True)
|
gregmbi/polyaxon | core/polyaxon/tracking/__init__.py | #!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
from polyaxon.polyboard.artifacts import V1RunArtifact
from polyaxon.tracking.run import Run
TRACKING_RUN = None
def init(
owner=None,
project=None,
run_uuid=None,
client=None,
track_code=True,
track_env=False,
):
global TRACKING_RUN
TRACKING_RUN = Run(
owner=owner,
project=project,
run_uuid=run_uuid,
client=client,
track_code=track_code,
track_env=track_env,
)
def log_data_ref(name, value):
global TRACKING_RUN
TRACKING_RUN.log_data_ref(
name=name, value=value,
)
def log_metric(name, value, step=None, timestamp=None):
global TRACKING_RUN
TRACKING_RUN.log_metric(
name=name, value=value, step=step, timestamp=timestamp,
)
def log_metrics(step=None, timestamp=None, **metrics):
global TRACKING_RUN
TRACKING_RUN.log_metrics(step=step, timestamp=timestamp, **metrics)
def log_image(data, name=None, step=None, timestamp=None, rescale=1, dataformats="CHW"):
global TRACKING_RUN
TRACKING_RUN.log_image(
data=data,
name=name,
step=step,
timestamp=timestamp,
rescale=rescale,
dataformats=dataformats,
)
def log_image_with_boxes(
tensor_image,
tensor_boxes,
name=None,
step=None,
timestamp=None,
rescale=1,
dataformats="CHW",
):
global TRACKING_RUN
TRACKING_RUN.log_image_with_boxes(
tensor_image=tensor_image,
tensor_boxes=tensor_boxes,
name=name,
step=step,
timestamp=timestamp,
rescale=rescale,
dataformats=dataformats,
)
def log_mpl_image(data, name=None, close=True, step=None, timestamp=None):
global TRACKING_RUN
TRACKING_RUN.log_mpl_image(
data=data, name=name, close=close, step=step, timestamp=timestamp,
)
def log_video(data, name=None, fps=4, step=None, timestamp=None, content_type=None):
global TRACKING_RUN
TRACKING_RUN.log_video(
data=data,
name=name,
fps=fps,
step=step,
timestamp=timestamp,
content_type=content_type,
)
def log_audio(
data, name=None, sample_rate=44100, step=None, timestamp=None, content_type=None,
):
global TRACKING_RUN
TRACKING_RUN.log_audio(
data=data,
name=name,
sample_rate=sample_rate,
step=step,
timestamp=timestamp,
content_type=content_type,
)
def log_text(name, text, step=None, timestamp=None):
global TRACKING_RUN
TRACKING_RUN.log_text(
name=name, text=text, step=step, timestamp=timestamp,
)
def log_html(name, html, step=None, timestamp=None):
global TRACKING_RUN
TRACKING_RUN.log_html(
name=name, html=html, step=step, timestamp=timestamp,
)
def log_np_histogram(name, values, counts, step=None, timestamp=None):
global TRACKING_RUN
TRACKING_RUN.log_np_histogram(
name=name, values=values, counts=counts, step=step, timestamp=timestamp,
)
def log_histogram(name, values, bins, max_bins=None, step=None, timestamp=None):
global TRACKING_RUN
TRACKING_RUN.log_histogram(
name=name,
values=values,
bins=bins,
max_bins=max_bins,
step=step,
timestamp=timestamp,
)
def log_model(path, name=None, framework=None, spec=None, step=None, timestamp=None):
global TRACKING_RUN
TRACKING_RUN.log_model(
path=path,
name=name,
framework=framework,
spec=spec,
step=step,
timestamp=timestamp,
)
def log_dataframe(path, name=None, content_type=None, step=None, timestamp=None):
global TRACKING_RUN
TRACKING_RUN.log_dataframe(
path=path, name=name, content_type=content_type, step=step, timestamp=timestamp,
)
def log_artifact(path, name=None, artifact_kind=None, step=None, timestamp=None):
global TRACKING_RUN
TRACKING_RUN.log_artifact(
path=path,
name=name,
artifact_kind=artifact_kind,
step=step,
timestamp=timestamp,
)
def log_plotly_chart(name, figure, step=None, timestamp=None):
global TRACKING_RUN
TRACKING_RUN.log_plotly_chart(
name=name, figure=figure, step=step, timestamp=timestamp,
)
def log_bokeh_chart(name, figure, step=None, timestamp=None):
global TRACKING_RUN
TRACKING_RUN.log_bokeh_chart(
name=name, figure=figure, step=step, timestamp=timestamp,
)
def log_mpl_plotly_chart(name, figure, step=None, timestamp=None):
global TRACKING_RUN
TRACKING_RUN.log_mpl_plotly_chart(
name=name, figure=figure, step=step, timestamp=timestamp,
)
def set_description(description):
global TRACKING_RUN
TRACKING_RUN.set_description(description=description)
def set_name(name):
global TRACKING_RUN
TRACKING_RUN.set_name(name=name)
def log_status(status, reason=None, message=None):
global TRACKING_RUN
TRACKING_RUN.log_status(
status=status, reason=reason, message=message,
)
def log_inputs(reset=False, **inputs):
global TRACKING_RUN
TRACKING_RUN.log_inputs(reset=reset, **inputs)
def log_outputs(reset=False, **outputs):
global TRACKING_RUN
TRACKING_RUN.log_outputs(reset=reset, **outputs)
def log_tags():
global TRACKING_RUN
TRACKING_RUN.log_tags()
def log_succeeded():
global TRACKING_RUN
TRACKING_RUN.log_succeeded()
def log_stopped():
global TRACKING_RUN
TRACKING_RUN.log_stopped()
def log_failed(message=None, traceback=None):
global TRACKING_RUN
TRACKING_RUN.log_failed(message=message, traceback=traceback)
def log_code_ref():
global TRACKING_RUN
TRACKING_RUN.log_code_ref()
def log_artifact_lineage(body: List[V1RunArtifact]):
global TRACKING_RUN
TRACKING_RUN.log_artifact_lineage(body)
|
gregmbi/polyaxon | core/tests/test_polypod/test_contexts.py | <filename>core/tests/test_polypod/test_contexts.py
#!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from tests.utils import BaseTestCase
from polyaxon.connections.kinds import V1ConnectionKind
from polyaxon.connections.schemas import V1ClaimConnection
from polyaxon.polyaxonfile.specs import kinds
from polyaxon.polyflow import V1CompiledOperation, V1RunKind
from polyaxon.polypod.contexts import resolve_contexts
from polyaxon.schemas.types import V1ConnectionType
@pytest.mark.polypod_mark
class TestResolveContexts(BaseTestCase):
def test_resolver_default_contexts(self):
compiled_operation = V1CompiledOperation.read(
{
"version": 1.05,
"kind": kinds.COMPILED_OPERATION,
"plugins": {
"auth": False,
"shm": False,
"collectLogs": False,
"collectArtifacts": False,
"collectResources": False,
},
"run": {"kind": V1RunKind.JOB, "container": {"image": "test"},},
}
)
spec = resolve_contexts(
namespace="test",
owner_name="user",
project_name="project",
project_uuid="uuid",
run_uuid="uuid",
run_name="run",
run_path="test",
compiled_operation=compiled_operation,
artifacts_store=None,
connection_by_names={},
iteration=None,
)
assert spec == {
"globals": {
"owner_name": "user",
"project_unique_name": "user.project",
"project_name": "project",
"project_uuid": "uuid",
"run_info": "user.project.runs.uuid",
"name": "run",
"uuid": "uuid",
"namespace": "test",
"iteration": None,
},
"init": {},
"connections": {},
}
def test_resolver_init_and_connections_contexts(self):
store = V1ConnectionType(
name="test_claim",
kind=V1ConnectionKind.VOLUME_CLAIM,
schema=V1ClaimConnection(
mount_path="/claim/path", volume_claim="claim", read_only=True
),
)
compiled_operation = V1CompiledOperation.read(
{
"version": 1.05,
"kind": kinds.COMPILED_OPERATION,
"plugins": {
"auth": False,
"shm": False,
"collectLogs": False,
"collectArtifacts": False,
"collectResources": False,
},
"run": {
"kind": V1RunKind.JOB,
"container": {"image": "test"},
"connections": [store.name],
"init": [{"connection": store.name}],
},
}
)
spec = resolve_contexts(
namespace="test",
owner_name="user",
project_name="project",
project_uuid="uuid",
run_uuid="uuid",
run_name="run",
run_path="test",
compiled_operation=compiled_operation,
artifacts_store=store,
connection_by_names={store.name: store},
iteration=12,
)
assert spec == {
"globals": {
"owner_name": "user",
"project_unique_name": "user.project",
"project_name": "project",
"project_uuid": "uuid",
"name": "run",
"uuid": "uuid",
"artifacts_path": "/claim/path/test",
"namespace": "test",
"iteration": 12,
"run_info": "user.project.runs.uuid",
},
"init": {"test_claim": store.schema.to_dict()},
"connections": {"test_claim": store.schema.to_dict()},
}
def test_resolver_outputs_collections(self):
store = V1ConnectionType(
name="test_claim",
kind=V1ConnectionKind.VOLUME_CLAIM,
schema=V1ClaimConnection(
mount_path="/claim/path", volume_claim="claim", read_only=True
),
)
compiled_operation = V1CompiledOperation.read(
{
"version": 1.05,
"kind": kinds.COMPILED_OPERATION,
"plugins": {
"auth": False,
"shm": False,
"collectLogs": False,
"collectArtifacts": True,
"collectResources": True,
},
"run": {
"kind": V1RunKind.JOB,
"container": {"image": "test"},
"connections": [store.name],
"init": [{"connection": store.name}],
},
}
)
spec = resolve_contexts(
namespace="test",
owner_name="user",
project_name="project",
project_uuid="uuid",
run_uuid="uuid",
run_name="run",
run_path="test",
compiled_operation=compiled_operation,
artifacts_store=store,
connection_by_names={store.name: store},
iteration=12,
)
assert spec == {
"globals": {
"owner_name": "user",
"project_name": "project",
"project_unique_name": "user.project",
"project_uuid": "uuid",
"name": "run",
"uuid": "uuid",
"run_info": "user.project.runs.uuid",
"artifacts_path": "/plx-context/artifacts/test",
"namespace": "test",
"iteration": 12,
},
"init": {"test_claim": store.schema.to_dict()},
"connections": {"test_claim": store.schema.to_dict()},
}
def test_resolver_default_service_ports(self):
compiled_operation = V1CompiledOperation.read(
{
"version": 1.05,
"kind": kinds.COMPILED_OPERATION,
"plugins": {
"auth": False,
"shm": False,
"collectLogs": False,
"collectArtifacts": True,
"collectResources": True,
},
"run": {
"kind": V1RunKind.SERVICE,
"ports": [1212, 1234],
"container": {"image": "test", "command": "{{ ports[0] }}"},
},
}
)
spec = resolve_contexts(
namespace="test",
owner_name="user",
project_name="project",
project_uuid="uuid",
run_uuid="uuid",
run_name="run",
run_path="test",
compiled_operation=compiled_operation,
artifacts_store=None,
connection_by_names={},
iteration=12,
)
assert spec == {
"globals": {
"owner_name": "user",
"project_name": "project",
"project_unique_name": "user.project",
"project_uuid": "uuid",
"run_info": "user.project.runs.uuid",
"name": "run",
"uuid": "uuid",
"artifacts_path": "/plx-context/artifacts/test",
"namespace": "test",
"iteration": 12,
"ports": [1212, 1234],
"base_url": "/services/v1/test/user/project/runs/uuid",
},
"init": {},
"connections": {},
}
|
gregmbi/polyaxon | core/polyaxon/tracking/run.py | #!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import atexit
import os
import sys
import time
import polyaxon_sdk
from polyaxon_sdk.rest import ApiException
from urllib3.exceptions import HTTPError
from polyaxon import settings
from polyaxon.client import RunClient
from polyaxon.client.decorators import can_log_events, check_no_op, check_offline
from polyaxon.constants import UNKNOWN
from polyaxon.containers.contexts import (
CONTEXT_MOUNT_ARTIFACTS_FORMAT,
CONTEXT_MOUNT_RUN_OUTPUTS_FORMAT,
)
from polyaxon.env_vars.getters import (
get_collect_artifact,
get_collect_resources,
get_log_level,
)
from polyaxon.exceptions import PolyaxonClientException
from polyaxon.polyaxonfile import OperationSpecification
from polyaxon.polyboard.artifacts import V1ArtifactKind
from polyaxon.polyboard.events import LoggedEventSpec, V1Event, get_asset_path
from polyaxon.tracking.events import EventFileWriter, events_processors
from polyaxon.tracking.events.writer import ResourceFileWriter
from polyaxon.utils.env import get_run_env
from polyaxon.utils.path_utils import get_path_extension
class Run(RunClient):
@check_no_op
def __init__(
self,
owner=None,
project=None,
run_uuid=None,
client=None,
track_code=True,
track_env=False,
refresh_data=True,
):
super().__init__(
owner=owner, project=project, run_uuid=run_uuid, client=client,
)
self.track_code = track_code
self.track_env = track_env
self._artifacts_path = None
self._outputs_path = None
self._event_logger = None
self._resource_logger = None
self._results = {}
if settings.CLIENT_CONFIG.is_managed and self.run_uuid:
self.set_run_event_path()
if (
self.artifacts_path
and settings.CLIENT_CONFIG.is_managed
and get_collect_artifact()
):
self.set_run_event_logger()
if get_collect_resources():
self.set_run_resource_logger()
self._run = polyaxon_sdk.V1Run()
if settings.CLIENT_CONFIG.is_offline:
return
if self._run_uuid and refresh_data:
self.refresh_data()
# Track run env
if settings.CLIENT_CONFIG.is_managed and self.track_env:
self.log_run_env()
self._register_wait()
@property
def artifacts_path(self):
return self._artifacts_path
@property
def outputs_path(self):
return self._outputs_path
@check_no_op
def set_run_event_path(self):
self._artifacts_path = CONTEXT_MOUNT_ARTIFACTS_FORMAT.format(self.run_uuid)
self._outputs_path = CONTEXT_MOUNT_RUN_OUTPUTS_FORMAT.format(self.run_uuid)
@check_no_op
def set_run_event_logger(self):
self._event_logger = EventFileWriter(run_path=self.artifacts_path)
@check_no_op
def set_run_resource_logger(self):
self._resource_logger = ResourceFileWriter(run_path=self.artifacts_path)
@check_no_op
def create(self, name=None, tags=None, description=None, content=None):
operation = polyaxon_sdk.V1OperationBody()
if name:
operation.name = name
if tags:
operation.tags = tags
if description:
operation.description = description
if content:
try:
specification = OperationSpecification.read(content)
except Exception as e:
raise PolyaxonClientException("Client error: %s" % e) from e
operation.content = specification.to_dict(dump=True)
else:
operation.is_managed = False
if self.client:
try:
run = self.client.runs_v1.create_run(
owner=self.owner, project=self.project, body=operation
)
except (ApiException, HTTPError) as e:
raise PolyaxonClientException("Client error: %s" % e) from e
if not run:
raise PolyaxonClientException("Could not create a run.")
else:
run = polyaxon_sdk.V1Run(
name=operation.name,
tags=operation.tags,
description=operation.description,
content=operation.content,
is_managed=operation.is_managed,
)
self._run = run
self._run_uuid = run.uuid
if self.artifacts_path:
self.set_run_event_logger()
if self.track_code:
self.log_code_ref()
if self.track_env:
self.log_run_env()
if not settings.CLIENT_CONFIG.is_managed:
self._start()
else:
self._register_wait()
return self
@property
def is_service(self):
if settings.CLIENT_CONFIG.no_op:
return None
return settings.CLIENT_CONFIG.is_managed and settings.CLIENT_CONFIG.is_service
@check_no_op
@check_offline
@can_log_events
def log_metric(self, name, value, step=None, timestamp=None):
events = []
event_value = events_processors.metric(value)
if event_value == UNKNOWN:
return
events.append(
LoggedEventSpec(
name=name,
kind=V1ArtifactKind.METRIC,
event=V1Event.make(timestamp=timestamp, step=step, metric=event_value),
)
)
if events:
self._event_logger.add_events(events)
self._results[name] = event_value
@check_no_op
@check_offline
@can_log_events
def log_metrics(self, step=None, timestamp=None, **metrics):
events = []
for metric in metrics:
event_value = events_processors.metric(metrics[metric])
if event_value == UNKNOWN:
continue
events.append(
LoggedEventSpec(
name=metric,
kind=V1ArtifactKind.METRIC,
event=V1Event.make(
timestamp=timestamp, step=step, metric=event_value
),
)
)
if events:
self._event_logger.add_events(events)
@check_no_op
@check_offline
@can_log_events
def log_image(
self, data, name=None, step=None, timestamp=None, rescale=1, dataformats="CHW"
):
is_file = isinstance(data, str) and os.path.exists(data)
ext = "png"
if is_file:
name = name or os.path.basename(data)
ext = get_path_extension(filepath=data) or ext
else:
name = name or "image"
asset_path = get_asset_path(
run_path=self.artifacts_path,
kind=V1ArtifactKind.IMAGE,
name=name,
step=step,
ext=ext,
)
if is_file:
event_value = events_processors.image_path(
from_path=data, asset_path=asset_path
)
elif hasattr(data, "encoded_image_string"):
event_value = events_processors.encoded_image(
asset_path=asset_path, data=data
)
else:
event_value = events_processors.image(
asset_path=asset_path,
data=data,
rescale=rescale,
dataformats=dataformats,
)
if event_value == UNKNOWN:
return
logged_event = LoggedEventSpec(
name=name,
kind=V1ArtifactKind.IMAGE,
event=V1Event(timestamp=timestamp, step=step, image=event_value),
)
self._event_logger.add_event(logged_event)
@check_no_op
@check_offline
@can_log_events
def log_image_with_boxes(
self,
tensor_image,
tensor_boxes,
name=None,
step=None,
timestamp=None,
rescale=1,
dataformats="CHW",
):
name = name or "figure"
asset_path = get_asset_path(
run_path=self.artifacts_path,
kind=V1ArtifactKind.IMAGE,
name=name,
step=step,
)
event_value = events_processors.image_boxes(
asset_path=asset_path,
tensor_image=tensor_image,
tensor_boxes=tensor_boxes,
rescale=rescale,
dataformats=dataformats,
)
if event_value == UNKNOWN:
return
logged_event = LoggedEventSpec(
name=name,
kind=V1ArtifactKind.IMAGE,
event=V1Event(timestamp=timestamp, step=step, image=event_value),
)
self._event_logger.add_event(logged_event)
@check_no_op
@check_offline
@can_log_events
def log_mpl_image(self, data, name=None, close=True, step=None, timestamp=None):
name = name or "figure"
if isinstance(data, list):
event_value = events_processors.figures_to_images(figures=data, close=close)
if event_value == UNKNOWN:
return
self.log_image(
name=name,
data=event_value,
step=step,
timestamp=timestamp,
dataformats="NCHW",
)
else:
event_value = events_processors.figure_to_image(figure=data, close=close)
self.log_image(
name=name,
data=event_value,
step=step,
timestamp=timestamp,
dataformats="CHW",
)
@check_no_op
@check_offline
@can_log_events
def log_video(
self, data, name=None, fps=4, step=None, timestamp=None, content_type=None
):
is_file = isinstance(data, str) and os.path.exists(data)
content_type = content_type or "gif"
if is_file:
name = name or os.path.basename(data)
content_type = get_path_extension(filepath=data) or content_type
else:
name = name or "video"
asset_path = get_asset_path(
run_path=self.artifacts_path,
kind=V1ArtifactKind.VIDEO,
name=name,
step=step,
ext=content_type,
)
if is_file:
event_value = events_processors.video_path(
from_path=data, asset_path=asset_path, content_type=content_type
)
else:
event_value = events_processors.video(
asset_path=asset_path, tensor=data, fps=fps, content_type=content_type
)
if event_value == UNKNOWN:
return
logged_event = LoggedEventSpec(
name=name,
kind=V1ArtifactKind.VIDEO,
event=V1Event(timestamp=timestamp, step=step, video=event_value),
)
self._event_logger.add_event(logged_event)
@check_no_op
@check_offline
@can_log_events
def log_audio(
self,
data,
name=None,
sample_rate=44100,
step=None,
timestamp=None,
content_type=None,
):
is_file = isinstance(data, str) and os.path.exists(data)
ext = content_type or "wav"
if is_file:
name = name or os.path.basename(data)
ext = get_path_extension(filepath=data) or ext
else:
name = name or "audio"
asset_path = get_asset_path(
run_path=self.artifacts_path,
kind=V1ArtifactKind.AUDIO,
name=name,
step=step,
ext=ext,
)
if is_file:
event_value = events_processors.audio_path(
from_path=data, asset_path=asset_path, content_type=content_type
)
else:
event_value = events_processors.audio(
asset_path=asset_path, tensor=data, sample_rate=sample_rate
)
if event_value == UNKNOWN:
return
logged_event = LoggedEventSpec(
name=name,
kind=V1ArtifactKind.AUDIO,
event=V1Event(timestamp=timestamp, step=step, audio=event_value),
)
self._event_logger.add_event(logged_event)
@check_no_op
@check_offline
@can_log_events
def log_text(self, name, text, step=None, timestamp=None):
logged_event = LoggedEventSpec(
name=name,
kind=V1ArtifactKind.TEXT,
event=V1Event(timestamp=timestamp, step=step, text=text),
)
self._event_logger.add_event(logged_event)
@check_no_op
@check_offline
@can_log_events
def log_html(self, name, html, step=None, timestamp=None):
logged_event = LoggedEventSpec(
name=name,
kind=V1ArtifactKind.HTML,
event=V1Event(timestamp=timestamp, step=step, html=html),
)
self._event_logger.add_event(logged_event)
@check_no_op
@check_offline
@can_log_events
def log_np_histogram(self, name, values, counts, step=None, timestamp=None):
event_value = events_processors.np_histogram(values=values, counts=counts)
if event_value == UNKNOWN:
return
logged_event = LoggedEventSpec(
name=name,
kind=V1ArtifactKind.HISTOGRAM,
event=V1Event(timestamp=timestamp, step=step, histogram=event_value),
)
self._event_logger.add_event(logged_event)
@check_no_op
@check_offline
@can_log_events
def log_histogram(
self, name, values, bins, max_bins=None, step=None, timestamp=None
):
event_value = events_processors.histogram(
values=values, bins=bins, max_bins=max_bins
)
if event_value == UNKNOWN:
return
logged_event = LoggedEventSpec(
name=name,
kind=V1ArtifactKind.HISTOGRAM,
event=V1Event(timestamp=timestamp, step=step, histogram=event_value),
)
self._event_logger.add_event(logged_event)
@check_no_op
@check_offline
@can_log_events
def log_model(
self, path, name=None, framework=None, spec=None, step=None, timestamp=None
):
name = name or os.path.basename(path)
ext = None
if os.path.isfile(path):
ext = get_path_extension(filepath=path)
asset_path = get_asset_path(
run_path=self.artifacts_path,
kind=V1ArtifactKind.MODEL,
name=name,
step=step,
ext=ext,
)
model = events_processors.model_path(
from_path=path, asset_path=asset_path, framework=framework, spec=spec
)
logged_event = LoggedEventSpec(
name=name,
kind=V1ArtifactKind.MODEL,
event=V1Event(timestamp=timestamp, step=step, model=model),
)
self._event_logger.add_event(logged_event)
@check_no_op
@check_offline
@can_log_events
def log_dataframe(
self, path, name=None, content_type=None, step=None, timestamp=None
):
name = name or os.path.basename(path)
ext = get_path_extension(filepath=path)
asset_path = get_asset_path(
run_path=self.artifacts_path,
kind=V1ArtifactKind.DATAFRAME,
name=name,
step=step,
ext=ext,
)
df = events_processors.dataframe_path(
from_path=path, asset_path=asset_path, content_type=content_type
)
logged_event = LoggedEventSpec(
name=name,
kind=V1ArtifactKind.DATAFRAME,
event=V1Event(timestamp=timestamp, step=step, dataframe=df),
)
self._event_logger.add_event(logged_event)
@check_no_op
@check_offline
@can_log_events
def log_artifact(
self, path, name=None, artifact_kind=None, step=None, timestamp=None
):
name = name or os.path.basename(name)
ext = get_path_extension(filepath=path)
artifact_kind = artifact_kind or V1ArtifactKind.FILE
asset_path = get_asset_path(
run_path=self.artifacts_path,
kind=artifact_kind,
name=name,
step=step,
ext=ext,
)
artifact = events_processors.artifact_path(
from_path=path, asset_path=asset_path, kind=artifact_kind
)
logged_event = LoggedEventSpec(
name=name,
kind=artifact_kind,
event=V1Event(timestamp=timestamp, step=step, artifact=artifact),
)
self._event_logger.add_event(logged_event)
@check_no_op
@check_offline
@can_log_events
def log_plotly_chart(self, name, figure, step=None, timestamp=None):
chart = events_processors.plotly_chart(figure=figure)
logged_event = LoggedEventSpec(
name=name,
kind=V1ArtifactKind.CHART,
event=V1Event(timestamp=timestamp, step=step, chart=chart),
)
self._event_logger.add_event(logged_event)
@check_no_op
@check_offline
@can_log_events
def log_bokeh_chart(self, name, figure, step=None, timestamp=None):
chart = events_processors.bokeh_chart(figure=figure)
logged_event = LoggedEventSpec(
name=name,
kind=V1ArtifactKind.CHART,
event=V1Event(timestamp=timestamp, step=step, chart=chart),
)
self._event_logger.add_event(logged_event)
@check_no_op
@check_offline
@can_log_events
def log_mpl_plotly_chart(self, name, figure, step=None, timestamp=None):
chart = events_processors.mpl_plotly_chart(figure=figure)
logged_event = LoggedEventSpec(
name=name,
kind=V1ArtifactKind.CHART,
event=V1Event(timestamp=timestamp, step=step, chart=chart),
)
self._event_logger.add_event(logged_event)
@check_no_op
def get_log_level(self):
return get_log_level()
@check_no_op
@check_offline
def _register_wait(self):
atexit.register(self._wait)
@check_no_op
@check_offline
def _start(self):
atexit.register(self._end)
self.start()
def excepthook(exception, value, tb):
self.log_failed(message="Type: {}, Value: {}".format(exception, value))
# Resume normal work
sys.__excepthook__(exception, value, tb)
sys.excepthook = excepthook
@check_no_op
@check_offline
def _end(self):
self.log_succeeded()
self._wait()
@check_no_op
@check_offline
def _wait(self):
if self._event_logger:
self._event_logger.close()
if self._resource_logger:
self._resource_logger.close()
if self._results:
self.log_outputs(**self._results)
time.sleep(1)
@check_no_op
@check_offline
@can_log_events
def log_run_env(self):
# TODO: log to file
return get_run_env()
|
gregmbi/polyaxon | examples/in_cluster/pytorch/mnist/train.py | <filename>examples/in_cluster/pytorch/mnist/train.py
from __future__ import print_function
import logging
import torch
import torch.nn.functional as F
from torchvision import datasets, transforms
from torch.autograd import Variable
logging.basicConfig(level=logging.INFO)
def get_train_loader(data_dir, batch_size, cuda):
kwargs = {'num_workers': 1, 'pin_memory': True} if cuda else {}
train_loader = torch.utils.data.DataLoader(
datasets.MNIST(data_dir, train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=batch_size, shuffle=True, **kwargs)
return train_loader
def get_test_loader(data_dir, batch_size, cuda):
kwargs = {'num_workers': 1, 'pin_memory': True} if cuda else {}
test_loader = torch.utils.data.DataLoader(
datasets.MNIST(data_dir, train=False,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=batch_size, shuffle=True, **kwargs)
return test_loader
def train(model, train_loader, epoch, cuda, optimizer, log_interval):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
if cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % log_interval == 0:
logging.info(
'Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch,
batch_idx * len(data),
len(train_loader.dataset),
100. * batch_idx / len(train_loader),
loss.item())
)
def test(experiment, model, test_loader, cuda):
model.eval()
test_loss = 0
correct = 0
for data, target in test_loader:
if cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data, volatile=True), Variable(target)
output = model(data)
test_loss += F.nll_loss(output, target, size_average=False).item() # sum up batch loss
pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.data.view_as(pred)).cpu().sum()
test_loss /= len(test_loader.dataset)
accuracy = correct / len(test_loader.dataset)
logging.info(
'Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss,
correct,
len(test_loader.dataset),
100. * accuracy)
)
# Polyaxon
experiment.log_metrics(loss=test_loss, accuracy=accuracy.item())
|
gregmbi/polyaxon | core/polyaxon/polypod/contexts.py | <gh_stars>0
#!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict
from polyaxon.api import REWRITE_SERVICES_V1, SERVICES_V1
from polyaxon.containers import contexts
from polyaxon.polyflow import V1CompiledOperation
from polyaxon.polypod.specs.contexts import PluginsContextsSpec
from polyaxon.schemas.types import V1ConnectionType
from polyaxon.utils.path_utils import get_path
def resolve_contexts(
namespace: str,
owner_name: str,
project_name: str,
project_uuid: str,
run_uuid: str,
run_name: str,
run_path: str,
compiled_operation: V1CompiledOperation,
artifacts_store: V1ConnectionType,
connection_by_names: Dict[str, V1ConnectionType],
iteration: int,
) -> Dict:
resolved_contexts = {
"globals": {
"owner_name": owner_name,
"project_name": project_name,
"project_unique_name": "{}.{}".format(owner_name, project_name),
"project_uuid": project_uuid,
"run_info": "{}.{}.runs.{}".format(owner_name, project_name, run_uuid),
"name": run_name,
"uuid": run_uuid,
"namespace": namespace,
"iteration": iteration,
},
"init": {},
"connections": {},
}
contexts_spec = PluginsContextsSpec.from_config(compiled_operation.plugins)
if contexts_spec.collect_artifacts:
resolved_contexts["globals"]["artifacts_path"] = get_path(
contexts.CONTEXT_MOUNT_ARTIFACTS, run_path
)
elif artifacts_store:
resolved_contexts["globals"]["artifacts_path"] = get_path(
artifacts_store.store_path, run_path
)
if compiled_operation and not compiled_operation.has_pipeline:
init = compiled_operation.run.init or []
init_connections = [i for i in init if i.connection]
for init_connection in init_connections:
if connection_by_names[init_connection.connection].schema:
resolved_contexts["init"][
init_connection.connection
] = connection_by_names[init_connection.connection].schema.to_dict()
if compiled_operation.run.connections:
for connection in compiled_operation.run.connections:
if connection_by_names[connection].schema:
resolved_contexts["connections"][connection] = connection_by_names[
connection
].schema.to_dict()
if compiled_operation.is_service_run:
resolved_contexts["globals"]["ports"] = compiled_operation.run.ports
base_url = "/{service}/{namespace}/{owner_name}/{project_name}/runs/{run_uuid}".format(
service=REWRITE_SERVICES_V1
if compiled_operation.run.rewrite_path
else SERVICES_V1,
namespace=namespace,
owner_name=owner_name,
project_name=project_name,
run_uuid=run_uuid,
)
resolved_contexts["globals"]["base_url"] = base_url
return resolved_contexts
def resolve_globals_contexts(
namespace: str,
owner_name: str,
project_name: str,
project_uuid: str,
run_uuid: str,
run_name: str,
run_path: str,
iteration: int,
) -> Dict:
resolved_contexts = {
"globals": {
"owner_name": owner_name,
"project_name": project_name,
"project_unique_name": "{}.{}".format(owner_name, project_name),
"project_uuid": project_uuid,
"run_info": "{}.{}.runs.{}".format(owner_name, project_name, run_uuid),
"name": run_name,
"uuid": run_uuid,
"namespace": namespace,
"iteration": iteration,
"artifacts_path": get_path(contexts.CONTEXT_MOUNT_ARTIFACTS, run_path),
}
}
return resolved_contexts
|
gregmbi/polyaxon | core/polyaxon/deploy/schemas/deployment_types.py | #!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class DeploymentTypes:
KUBERNETES = "kubernetes"
MINIKUBE = "minikube"
MICRO_K8S = "microk8s"
DOCKER_COMPOSE = "docker-compose"
DOCKER = "docker"
HEROKU = "heroku"
VALUES = [KUBERNETES, MINIKUBE, MICRO_K8S, DOCKER_COMPOSE, DOCKER, HEROKU]
|
gregmbi/polyaxon | core/polyaxon/builds/builder.py | #!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import time
from docker import APIClient
from docker.errors import APIError, BuildError
from urllib3.exceptions import ReadTimeoutError
from polyaxon.exceptions import PolyaxonBuildException
from polyaxon.schemas.types import V1UriType
from polyaxon.utils.log_levels import LogLevels
_logger = logging.getLogger("polyaxon.dockerizer")
class DockerMixin(object):
IS_BUILD = None
def _prepare_log_lines(self, log_line): # pylint:disable=too-many-branches
raw = log_line.decode("utf-8").strip()
raw_lines = raw.split("\n")
log_lines = []
status = True
for raw_line in raw_lines:
try:
json_line = json.loads(raw_line)
if json_line.get("error"):
log_lines.append(
"{}: {}".format(
LogLevels.ERROR, str(json_line.get("error", json_line))
)
)
status = False
else:
if json_line.get("stream"):
log_lines.append(
"Building: {}".format(json_line["stream"].strip())
)
elif json_line.get("status"):
if not self.is_pushing and not self.IS_BUILD:
self.is_pushing = True
log_lines.append("Pushing ...")
elif json_line.get("aux"):
log_lines.append(
"Pushing finished: {}".format(json_line.get("aux"))
)
else:
log_lines.append(str(json_line))
except json.JSONDecodeError:
log_lines.append("JSON decode error: {}".format(raw_line))
return log_lines, status
def _handle_logs(self, log_lines):
for log_line in log_lines:
print(log_line) # pylint:disable=superfluous-parens
def _handle_log_stream(self, stream):
log_lines = []
status = True
try:
for log_line in stream:
new_log_lines, new_status = self._prepare_log_lines(log_line)
log_lines += new_log_lines
if not new_status:
status = new_status
self._handle_logs(log_lines)
log_lines = []
if log_lines:
self._handle_logs(log_lines)
except (BuildError, APIError) as e:
self._handle_logs(
[
"{}: Could not build the image, encountered {}".format(
LogLevels.ERROR, e
)
]
)
return False
return status
class DockerBuilder(DockerMixin):
IS_BUILD = True
def __init__(
self, context, destination, credstore_env=None, registries=None, docker=None
):
self.destination = destination
self.context = context
self._validate_registries(registries)
self.registries = registries
self.docker = docker or APIClient(version="auto", credstore_env=credstore_env)
self.is_pushing = False
@staticmethod
def _validate_registries(registries):
if not registries or isinstance(registries, V1UriType):
return True
for registry in registries:
if not isinstance(registry, V1UriType):
raise PolyaxonBuildException(
"A registry `{}` is not valid Urispec.".format(registry)
)
return True
def check_image(self):
return self.docker.images(self.destination)
def login_private_registries(self):
if not self.registries:
return
for registry in self.registries:
self.docker.login(
username=registry.user,
password=<PASSWORD>,
registry=registry.host,
reauth=True,
)
def build(self, nocache=False, memory_limit=None):
limits = {
# Disable memory swap for building
"memswap": -1
}
if memory_limit:
limits["memory"] = memory_limit
stream = self.docker.build(
path=self.context,
tag=self.destination,
forcerm=True,
rm=True,
pull=True,
nocache=nocache,
container_limits=limits,
)
return self._handle_log_stream(stream=stream)
class DockerPusher(DockerMixin):
IS_BUILD = False
def __init__(self, destination, credstore_env=None, docker=None):
self.destination = destination
self.docker = docker or APIClient(version="auto", credstore_env=credstore_env)
self.is_pushing = False
def push(self):
stream = self.docker.push(self.destination, stream=True)
return self._handle_log_stream(stream=stream)
def _build(
context, destination, nocache, docker=None, credstore_env=None, registries=None
):
"""Build necessary code for a job to run"""
_logger.info("Starting build ...")
# Build the image
docker_builder = DockerBuilder(
context=context,
destination=destination,
credstore_env=credstore_env,
registries=registries,
docker=docker,
)
docker_builder.login_private_registries()
if docker_builder.check_image() and not nocache:
# Image already built
return docker_builder
if not docker_builder.build(nocache=nocache):
raise PolyaxonBuildException("The docker image could not be built.")
return docker_builder
def build(
context,
destination,
nocache,
docker=None,
credstore_env=None,
registries=None,
max_retries=3,
sleep_interval=1,
):
"""Build necessary code for a job to run"""
retry = 0
is_done = False
while retry < max_retries and not is_done:
try:
docker_builder = _build(
context=context,
destination=destination,
docker=docker,
nocache=nocache,
credstore_env=credstore_env,
registries=registries,
)
is_done = True
return docker_builder
except ReadTimeoutError:
retry += 1
time.sleep(sleep_interval)
if not is_done:
raise PolyaxonBuildException(
"The docker image could not be built, client timed out."
)
def push(destination, docker=None, max_retries=3, sleep_interval=1):
docker_pusher = DockerPusher(destination=destination, docker=docker)
retry = 0
is_done = False
while retry < max_retries and not is_done:
try:
if not docker_pusher.push():
raise PolyaxonBuildException("The docker image could not be pushed.")
else:
is_done = True
except ReadTimeoutError:
retry += 1
time.sleep(sleep_interval)
if not is_done:
raise PolyaxonBuildException(
"The docker image could not be pushed, client timed out."
)
def build_and_push(
context,
destination,
nocache,
credstore_env=None,
registries=None,
max_retries=3,
sleep_interval=1,
):
"""Build necessary code for a job to run and push it."""
# Build the image
docker_builder = build(
context=context,
destination=destination,
nocache=nocache,
credstore_env=credstore_env,
registries=registries,
max_retries=max_retries,
sleep_interval=sleep_interval,
)
push(
destination=destination,
docker=docker_builder.docker,
max_retries=max_retries,
sleep_interval=sleep_interval,
)
|
gregmbi/polyaxon | core/tests/test_polypod/test_resolvers/test_core_resolver.py | <filename>core/tests/test_polypod/test_resolvers/test_core_resolver.py
#!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tempfile
import pytest
from tests.utils import BaseTestCase
from polyaxon import settings
from polyaxon.connections.kinds import V1ConnectionKind
from polyaxon.connections.schemas import V1BucketConnection, V1K8sResourceSchema
from polyaxon.containers.containers import (
get_default_init_container,
get_default_sidecar_container,
)
from polyaxon.exceptions import PolyaxonCompilerError
from polyaxon.managers.agent import AgentManager
from polyaxon.polyaxonfile.specs import kinds
from polyaxon.polyflow import V1CompiledOperation, V1RunKind
from polyaxon.polypod.compiler.resolvers import CoreResolver
from polyaxon.schemas.cli.agent_config import AgentConfig
from polyaxon.schemas.types import V1ConnectionType, V1K8sResourceType
@pytest.mark.polypod_mark
class TestResolver(BaseTestCase):
def setUp(self):
super().setUp()
self.compiled_operation = V1CompiledOperation.read(
{
"version": 1.05,
"kind": kinds.COMPILED_OPERATION,
"plugins": {
"auth": False,
"shm": False,
"collectLogs": False,
"collectArtifacts": False,
"collectResources": False,
},
"run": {"kind": V1RunKind.JOB, "container": {"image": "test"},},
}
)
def test_core_resolver_instance(self):
resolver = CoreResolver(
compiled_operation=self.compiled_operation,
owner_name="user",
project_name="p1",
run_name="j1",
run_path="test",
params=None,
)
assert resolver.project_uuid == resolver.project_name
assert resolver.run_uuid == resolver.run_name
resolver = CoreResolver(
compiled_operation=self.compiled_operation,
owner_name="user",
project_name="p1",
run_name="j1",
run_path="test",
project_uuid="some_uuid",
run_uuid="some_uuid",
params=None,
)
assert resolver.project_uuid != resolver.project_name
assert resolver.run_uuid != resolver.run_name
def test_resolve_connections_with_no_config(self):
settings.AGENT_CONFIG = None
resolver = CoreResolver(
compiled_operation=self.compiled_operation,
owner_name="user",
project_name="p1",
run_name="j1",
run_path="test",
params=None,
)
with self.assertRaises(PolyaxonCompilerError):
resolver.resolve_connections()
def test_resolve_without_compiled_operation(self):
with self.assertRaises(PolyaxonCompilerError):
CoreResolver(
compiled_operation=None,
owner_name="user",
project_name="p1",
run_name="j1",
run_path="test",
params=None,
)
def test_resolve_connections_with_invalid_config(self):
fpath = tempfile.mkdtemp()
AgentManager.CONFIG_PATH = fpath
secret1 = V1K8sResourceType(
name="secret1",
schema=V1K8sResourceSchema(name="secret1"),
is_requested=True,
)
secret2 = V1K8sResourceType(
name="secret2",
schema=V1K8sResourceSchema(name="secret2"),
is_requested=True,
)
connection1 = V1ConnectionType(
name="test_s3",
kind=V1ConnectionKind.S3,
schema=V1BucketConnection(bucket="s3//:foo"),
secret=secret1.schema,
)
connection2 = V1ConnectionType(
name="test_gcs",
kind=V1ConnectionKind.GCS,
schema=V1BucketConnection(bucket="gcs//:foo"),
secret=secret1.schema,
)
connection3 = V1ConnectionType(
name="test_wasb",
kind=V1ConnectionKind.WASB,
schema=V1BucketConnection(bucket="wasbs//:foo"),
secret=secret2.schema,
)
settings.AGENT_CONFIG = AgentConfig(
namespace="foo",
artifacts_store=connection1,
connections=[connection2, connection3],
)
resolver = CoreResolver(
compiled_operation=self.compiled_operation,
owner_name="user",
project_name="p1",
run_name="j1",
run_path="test",
params=None,
)
resolver.resolve_connections()
assert resolver.namespace == "foo"
assert resolver.connection_by_names == {connection1.name: connection1}
assert resolver.artifacts_store == connection1
assert [s.schema for s in resolver.secrets] == [secret1.schema, secret2.schema]
assert resolver.polyaxon_sidecar == get_default_sidecar_container()
assert resolver.polyaxon_init == get_default_init_container()
# Add run spec to resolve connections
compiled_operation = V1CompiledOperation.read(
{
"version": 1.05,
"kind": kinds.COMPILED_OPERATION,
"plugins": {
"auth": False,
"shm": False,
"collectLogs": False,
"collectArtifacts": False,
"collectResources": False,
},
"run": {
"kind": V1RunKind.JOB,
"container": {"image": "test"},
"connections": {connection3.name},
},
}
)
resolver = CoreResolver(
compiled_operation=compiled_operation,
owner_name="user",
project_name="p1",
run_name="j1",
run_path="test",
params=None,
)
resolver.resolve_connections()
assert resolver.namespace == "foo"
assert resolver.connection_by_names == {
connection1.name: connection1,
connection3.name: connection3,
}
assert [s.schema for s in resolver.secrets] == [secret1.schema, secret2.schema]
assert resolver.artifacts_store == connection1
assert resolver.polyaxon_sidecar == get_default_sidecar_container()
assert resolver.polyaxon_init == get_default_init_container()
# Add run spec to resolve connections
compiled_operation = V1CompiledOperation.read(
{
"version": 1.05,
"kind": kinds.COMPILED_OPERATION,
"plugins": {
"auth": False,
"shm": False,
"collectLogs": False,
"collectArtifacts": False,
"collectResources": False,
},
"run": {
"kind": V1RunKind.JOB,
"container": {"image": "test"},
"connections": {
connection1.name,
connection2.name,
connection3.name,
},
},
}
)
resolver = CoreResolver(
compiled_operation=compiled_operation,
owner_name="user",
project_name="p1",
run_name="j1",
run_path="test",
params=None,
)
resolver.resolve_connections()
assert resolver.namespace == "foo"
assert resolver.connection_by_names == {
connection3.name: connection3,
connection2.name: connection2,
connection1.name: connection1,
}
assert [s.schema for s in resolver.secrets] == [secret1.schema, secret2.schema]
assert resolver.artifacts_store == connection1
assert resolver.polyaxon_sidecar == get_default_sidecar_container()
assert resolver.polyaxon_init == get_default_init_container()
|
gregmbi/polyaxon | core/polyaxon/polyflow/run/dag.py | #!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import Mapping
import polyaxon_sdk
from marshmallow import fields, validate
from polyaxon import types
from polyaxon.exceptions import PolyaxonSchemaError
from polyaxon.k8s import k8s_schemas
from polyaxon.pkg import SCHEMA_VERSION
from polyaxon.polyflow import dags
from polyaxon.polyflow.early_stopping import EarlyStoppingSchema
from polyaxon.polyflow.environment import EnvironmentSchema
from polyaxon.polyflow.io import V1IO
from polyaxon.polyflow.io import params as ops_params
from polyaxon.polyflow.run.kinds import V1RunKind
from polyaxon.schemas.base import BaseCamelSchema, BaseConfig
from polyaxon.schemas.fields.swagger import SwaggerField
class DagSchema(BaseCamelSchema):
kind = fields.Str(allow_none=True, validate=validate.Equal(V1RunKind.DAG))
operations = fields.List(fields.Nested("OperationSchema"))
components = fields.List(fields.Nested("ComponentSchema"))
environment = fields.Nested(EnvironmentSchema, allow_none=True)
connections = fields.List(fields.Str(), allow_none=True)
volumes = fields.List(SwaggerField(cls=k8s_schemas.V1Volume), allow_none=True)
concurrency = fields.Int(allow_none=True)
early_stopping = fields.List(fields.Nested(EarlyStoppingSchema), allow_none=True)
@staticmethod
def schema_config():
return V1Dag
class V1Dag(BaseConfig, polyaxon_sdk.V1Dag):
SCHEMA = DagSchema
IDENTIFIER = V1RunKind.DAG
REDUCED_ATTRIBUTES = [
"operations",
"components",
"concurrency",
"earlyStopping",
"environment",
"connections",
"volumes",
]
def __init__(
self,
operations=None,
components=None,
concurrency=None,
early_stopping=None,
kind=None,
environment=None,
connections=None,
volumes=None,
):
super().__init__(
kind=kind,
operations=operations,
components=components,
concurrency=concurrency,
early_stopping=early_stopping,
environment=environment,
connections=connections,
volumes=volumes,
)
self._dag = {} # OpName -> DagOpSpec
self._components_by_names = {} # ComponentName -> Component
self._op_component_mapping = {} # OpName -> ComponentName
self._context = {} # Ops output names -> types
@property
def dag(self):
return self._dag
def validate_dag(self, dag=None):
dag = dag or self.dag
orphan_ops = self.get_orphan_ops(dag=dag)
if orphan_ops:
raise PolyaxonSchemaError(
"Pipeline has a non valid dag, the dag contains an orphan ops: `{}`, "
"check if you are referencing this op "
"in a parameter or a condition".format(orphan_ops)
)
self.sort_topologically(dag=dag)
def _get_op_upstream(self, op):
upstream = set(op.dependencies) if op.dependencies else set([])
if not op.params:
return upstream
if not isinstance(op.params, Mapping):
raise PolyaxonSchemaError(
"Op `{}` defines a malformed params `{}`, "
"params should be a dictionary of form <name: value>".format(
op.name, op.params
)
)
for param in op.params:
param_spec = op.params[param].get_spec(
name=param, iotype=None, is_flag=None
)
if param_spec.param.is_ops_ref:
upstream.add(param_spec.param.entity_ref)
return upstream
def _process_op(self, op):
upstream = self._get_op_upstream(op=op)
self._dag = dags.set_dag_op(
dag=self.dag, op_id=op.name, op=op, upstream=upstream, downstream=None
)
for op_name in upstream:
self._dag = dags.set_dag_op(
dag=self.dag, op_id=op_name, downstream=[op.name]
)
def process_dag(self):
for op in self.operations or []:
self._process_op(op)
def add_op(self, op):
self.operations = self.operations or []
self.operations.append(op)
self._process_op(op)
def add_ops(self, ops):
for op in ops:
self.add_op(op)
def get_independent_ops(self, dag=None):
"""Get a list of all node in the graph with no dependencies."""
return dags.get_independent_ops(self.dag or dag)
def get_orphan_ops(self, dag=None):
"""Get orphan ops for given dag."""
return dags.get_orphan_ops(dag or self.dag)
def sort_topologically(self, dag=None, flatten=False):
"""Sort the dag breath first topologically.
Only the nodes inside the dag are returned, i.e. the nodes that are also keys.
Returns:
a topological ordering of the DAG.
Raises:
an error if this is not possible (graph is not valid).
"""
return dags.sort_topologically(dag or self.dag, flatten=flatten)
def process_components(self, inputs=None):
inputs = inputs or []
for _input in inputs:
self._context["dag.inputs.{}".format(_input.name)] = _input
if not self.operations:
raise PolyaxonSchemaError(
"Pipeline is not valid, it has no ops to validate components."
)
components = self.components or []
for component in components:
component_name = component.name
if component_name in self._components_by_names:
raise PolyaxonSchemaError(
"Pipeline has multiple components with the same name `{}`".format(
component_name
)
)
self._components_by_names[component_name] = component
for op in self.operations:
op_name = op.name
if op.has_component_reference:
outputs = op.template.outputs
inputs = op.template.inputs
elif op.has_dag_reference:
component_ref_name = op.template.name
if op_name in self._op_component_mapping:
raise PolyaxonSchemaError(
"Pipeline has multiple ops with the same name `{}`".format(
op_name
)
)
if component_ref_name not in self._components_by_names:
raise PolyaxonSchemaError(
"Pipeline op with name `{}` requires a component with name `{}`, "
"which is not defined on this pipeline.".format(
op_name, component_ref_name
)
)
self._op_component_mapping[op_name] = component_ref_name
outputs = self._components_by_names[component_ref_name].outputs
inputs = self._components_by_names[component_ref_name].inputs
elif op.has_hub_reference:
continue
elif op.has_url_reference:
continue
elif op.has_path_reference:
continue
else:
raise PolyaxonSchemaError(
"Pipeline op has no template field `{}`".format(op_name)
)
if outputs:
for output in outputs:
self._context[
"ops.{}.outputs.{}".format(op_name, output.name)
] = output
if inputs:
for cinput in inputs:
self._context[
"ops.{}.inputs.{}".format(op_name, cinput.name)
] = cinput
# We allow to resolve name, status, project, all outputs/inputs, iteration
self._context["ops.{}.inputs".format(op_name)] = V1IO(
name="inputs", iotype=types.DICT, value={}, is_optional=True
)
self._context["ops.{}.outputs".format(op_name)] = V1IO(
name="outputs", iotype=types.DICT, value={}, is_optional=True
)
self._context["ops.{}.status".format(op_name)] = V1IO(
name="status", iotype=types.STR, value="", is_optional=True
)
self._context["ops.{}.name".format(op_name)] = V1IO(
name="name", iotype=types.STR, value="", is_optional=True
)
self._context["ops.{}.uuid".format(op_name)] = V1IO(
name="uuid", iotype=types.STR, value="", is_optional=True
)
self._context["ops.{}.project_name".format(op_name)] = V1IO(
name="project_name", iotype=types.STR, value="", is_optional=True
)
self._context["ops.{}.project_uuid".format(op_name)] = V1IO(
name="project_uuid", iotype=types.STR, value="", is_optional=True
)
self._context["ops.{}.iteration".format(op_name)] = V1IO(
name="iteration", iotype=types.STR, value="", is_optional=True
)
for op in self.operations:
if op.has_component_reference:
component_ref = op.template.name
outputs = op.template.outputs
inputs = op.template.inputs
elif op.has_hub_reference:
component_ref = op.template.name
continue
elif op.has_url_reference:
component_ref = op.template.url
continue
elif op.has_path_reference:
component_ref = op.template.path
continue
elif op.has_dag_reference:
component_ref = op.template.name
outputs = self._components_by_names[component_ref].outputs
inputs = self._components_by_names[component_ref].inputs
else:
raise PolyaxonSchemaError(
"Pipeline op has no template field `{}`".format(op.name)
)
ops_params.validate_params(
params=op.params,
inputs=inputs,
outputs=outputs,
context=self._context,
is_template=False,
check_runs=False,
extra_info="<op {}>.<component {}>".format(op.name, component_ref),
)
def set_op_component(self, op_name):
if op_name not in self.dag:
raise PolyaxonSchemaError(
"Job with name `{}` was not found in Dag, "
"make sure to run `process_dag`.".format(op_name)
)
op_spec = self.dag[op_name]
if op_spec.op.has_component_reference:
return
if op_name not in self._op_component_mapping:
raise PolyaxonSchemaError(
"Pipeline op with name `{}` requires a reference `{} ({})`, "
"which is not defined on this pipeline, "
"make sure to run `process_components`".format(
op_name,
op_spec.op.template.kind,
op_spec.op.template.get_kind_value(),
)
)
component_ref_name = self._op_component_mapping[op_name]
op_spec.op.set_template(self._components_by_names[component_ref_name])
def get_op_spec_by_index(self, idx):
from polyaxon.polyaxonfile import OperationSpecification
op_dict = self.operations[idx].to_dict()
op_dict[OperationSpecification.VERSION] = op_dict.get(
OperationSpecification.VERSION, SCHEMA_VERSION
)
return OperationSpecification.read(op_dict)
def get_op_spec_by_name(self, name):
from polyaxon.polyaxonfile import OperationSpecification
op_dict = self.dag[name].op.to_dict()
op_dict[OperationSpecification.VERSION] = op_dict.get(
OperationSpecification.VERSION, SCHEMA_VERSION
)
return OperationSpecification.read(op_dict)
|
gregmbi/polyaxon | core/polyaxon/polyaxonfile/specs/operation.py | <filename>core/polyaxon/polyaxonfile/specs/operation.py
#!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict
from polyaxon.polyaxonfile.specs import kinds
from polyaxon.polyaxonfile.specs.base import BaseSpecification
from polyaxon.polyflow import V1CompiledOperation, V1Operation
class OperationSpecification(BaseSpecification):
"""The polyaxonfile specification for operations."""
_SPEC_KIND = kinds.OPERATION
CONFIG = V1Operation
@classmethod
def compile_operation(
cls, config: V1Operation, override: Dict = None, override_post: bool = True
) -> V1CompiledOperation:
return V1CompiledOperation.read(
cls.generate_run_data(config, override, override_post)
)
@classmethod
def generate_run_data(cls, config: V1Operation, override=None, override_post=True):
op_config = config.to_light_dict()
# Remove tag is is set
component_config = op_config.pop("component", {})
component_config.pop("componentTag", None)
values = [
{"version": config.version},
component_config,
{"kind": kinds.COMPILED_OPERATION},
]
op_override = {}
for field in [
cls.NAME,
cls.DESCRIPTION,
cls.TAGS,
cls.PROFILE,
cls.QUEUE,
cls.CACHE,
cls.PLUGINS,
cls.TERMINATION,
cls.PARALLEL,
cls.SCHEDULE,
cls.DEPENDENCIES,
cls.TRIGGER,
cls.CONDITIONS,
cls.SKIP_ON_UPSTREAM_SKIP,
]:
override_field = op_config.get(field)
if override_field:
op_override[field] = override_field
# Patch run
run_patch = op_config.get(cls.RUN_PATCH)
if run_patch:
op_override[cls.RUN] = run_patch
if override_post:
if op_override:
values.append(op_override)
if override:
values.append(override)
else:
if override:
values.append(override)
if op_override:
values.append(op_override)
return values
|
gregmbi/polyaxon | core/polyaxon/managers/base.py | <filename>core/polyaxon/managers/base.py
#!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from collections import Mapping
import ujson
from polyaxon.containers.contexts import polyaxon_user_path
from polyaxon.logger import logger
from polyaxon.schemas.base import BaseConfig
class BaseConfigManager(object):
"""Base class for managing a configuration file."""
IS_GLOBAL = False
IS_POLYAXON_DIR = False
CONFIG_PATH = None
CONFIG_FILE_NAME = None
CONFIG = None
@staticmethod
def _create_dir(dir_path):
if not os.path.exists(dir_path):
try:
os.makedirs(dir_path)
except OSError:
# Except permission denied and potential race conditions
# in multi-threaded environments.
logger.error("Could not create config directory `%s`", dir_path)
@classmethod
def get_config_filepath(cls, create=True):
if not cls.IS_GLOBAL:
# local to this directory
base_path = os.path.join(".")
if cls.IS_POLYAXON_DIR:
# Add it to the current "./.polyaxon"
base_path = os.path.join(base_path, ".polyaxon")
if create:
cls._create_dir(base_path)
elif cls.CONFIG_PATH: # Custom path
base_path = cls.CONFIG_PATH
else:
base_path = polyaxon_user_path()
if create:
cls._create_dir(base_path)
return os.path.join(base_path, cls.CONFIG_FILE_NAME)
@classmethod
def init_config(cls):
config = cls.get_config()
cls.set_config(config, init=True)
@classmethod
def is_initialized(cls):
config_filepath = cls.get_config_filepath(False)
return os.path.isfile(config_filepath)
@classmethod
def set_config(cls, config, init=False):
config_filepath = cls.get_config_filepath()
if os.path.isfile(config_filepath) and init:
logger.debug(
"%s file already present at %s", cls.CONFIG_FILE_NAME, config_filepath
)
return
with open(config_filepath, "w") as config_file:
if hasattr(config, "to_dict"):
logger.debug(
"Setting %s in the file %s", config.to_dict(), cls.CONFIG_FILE_NAME
)
config_file.write(ujson.dumps(config.to_dict()))
elif isinstance(config, Mapping):
config_file.write(ujson.dumps(config))
else:
logger.debug("Setting %s in the file %s", config, cls.CONFIG_FILE_NAME)
config_file.write(config)
@classmethod
def get_config(cls):
if not cls.is_initialized():
return None
config_filepath = cls.get_config_filepath()
with open(config_filepath, "r") as config_file:
config_str = config_file.read()
if issubclass(cls.CONFIG, BaseConfig):
return cls.CONFIG.from_dict(ujson.loads(config_str))
return cls.CONFIG(**ujson.loads(config_str))
@classmethod
def get_config_or_default(cls):
if not cls.is_initialized():
return cls.CONFIG() # pylint:disable=not-callable
return cls.get_config()
@classmethod
def get_config_from_env(cls, **kwargs):
raise NotImplementedError
@classmethod
def get_value(cls, key):
config = cls.get_config()
if config:
if hasattr(config, key):
return getattr(config, key)
else:
logger.warning("Config `%s` has no key `%s`", cls.CONFIG.__name__, key)
return None
@classmethod
def purge(cls):
config_filepath = cls.get_config_filepath()
if not os.path.isfile(config_filepath):
return
os.remove(config_filepath)
|
gregmbi/polyaxon | core/polyaxon/polyboard/artifacts/kinds.py | #!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import polyaxon_sdk
class V1ArtifactKind(polyaxon_sdk.V1ArtifactKind):
CHOICES = (
(polyaxon_sdk.V1ArtifactKind.MODEL, polyaxon_sdk.V1ArtifactKind.MODEL),
(polyaxon_sdk.V1ArtifactKind.AUDIO, polyaxon_sdk.V1ArtifactKind.AUDIO),
(polyaxon_sdk.V1ArtifactKind.VIDEO, polyaxon_sdk.V1ArtifactKind.VIDEO),
(polyaxon_sdk.V1ArtifactKind.HISTOGRAM, polyaxon_sdk.V1ArtifactKind.HISTOGRAM),
(polyaxon_sdk.V1ArtifactKind.IMAGE, polyaxon_sdk.V1ArtifactKind.IMAGE),
(polyaxon_sdk.V1ArtifactKind.TENSOR, polyaxon_sdk.V1ArtifactKind.TENSOR),
(polyaxon_sdk.V1ArtifactKind.DATAFRAME, polyaxon_sdk.V1ArtifactKind.DATAFRAME),
(polyaxon_sdk.V1ArtifactKind.CHART, polyaxon_sdk.V1ArtifactKind.CHART),
(polyaxon_sdk.V1ArtifactKind.CSV, polyaxon_sdk.V1ArtifactKind.CSV),
(polyaxon_sdk.V1ArtifactKind.TSV, polyaxon_sdk.V1ArtifactKind.TSV),
(polyaxon_sdk.V1ArtifactKind.PSV, polyaxon_sdk.V1ArtifactKind.PSV),
(polyaxon_sdk.V1ArtifactKind.SSV, polyaxon_sdk.V1ArtifactKind.SSV),
(polyaxon_sdk.V1ArtifactKind.METRIC, polyaxon_sdk.V1ArtifactKind.METRIC),
(polyaxon_sdk.V1ArtifactKind.ENV, polyaxon_sdk.V1ArtifactKind.ENV),
(polyaxon_sdk.V1ArtifactKind.HTML, polyaxon_sdk.V1ArtifactKind.HTML),
(polyaxon_sdk.V1ArtifactKind.TEXT, polyaxon_sdk.V1ArtifactKind.TEXT),
(polyaxon_sdk.V1ArtifactKind.FILE, polyaxon_sdk.V1ArtifactKind.FILE),
(polyaxon_sdk.V1ArtifactKind.DIR, polyaxon_sdk.V1ArtifactKind.DIR),
(
polyaxon_sdk.V1ArtifactKind.DOCKERFILE,
polyaxon_sdk.V1ArtifactKind.DOCKERFILE,
),
(
polyaxon_sdk.V1ArtifactKind.DOCKER_IMAGE,
polyaxon_sdk.V1ArtifactKind.DOCKER_IMAGE,
),
(polyaxon_sdk.V1ArtifactKind.DATA, polyaxon_sdk.V1ArtifactKind.DATA),
(polyaxon_sdk.V1ArtifactKind.CODEREF, polyaxon_sdk.V1ArtifactKind.CODEREF),
(polyaxon_sdk.V1ArtifactKind.TABLE, polyaxon_sdk.V1ArtifactKind.TABLE),
)
VALUES = {
polyaxon_sdk.V1ArtifactKind.MODEL,
polyaxon_sdk.V1ArtifactKind.AUDIO,
polyaxon_sdk.V1ArtifactKind.VIDEO,
polyaxon_sdk.V1ArtifactKind.HISTOGRAM,
polyaxon_sdk.V1ArtifactKind.IMAGE,
polyaxon_sdk.V1ArtifactKind.TENSOR,
polyaxon_sdk.V1ArtifactKind.DATAFRAME,
polyaxon_sdk.V1ArtifactKind.CHART,
polyaxon_sdk.V1ArtifactKind.CSV,
polyaxon_sdk.V1ArtifactKind.TSV,
polyaxon_sdk.V1ArtifactKind.PSV,
polyaxon_sdk.V1ArtifactKind.SSV,
polyaxon_sdk.V1ArtifactKind.METRIC,
polyaxon_sdk.V1ArtifactKind.ENV,
polyaxon_sdk.V1ArtifactKind.HTML,
polyaxon_sdk.V1ArtifactKind.TEXT,
polyaxon_sdk.V1ArtifactKind.FILE,
polyaxon_sdk.V1ArtifactKind.DIR,
polyaxon_sdk.V1ArtifactKind.DOCKERFILE,
polyaxon_sdk.V1ArtifactKind.DOCKER_IMAGE,
polyaxon_sdk.V1ArtifactKind.DATA,
polyaxon_sdk.V1ArtifactKind.CODEREF,
polyaxon_sdk.V1ArtifactKind.TABLE,
}
|
gregmbi/polyaxon | core/polyaxon/utils/enums_utils.py | from enum import Enum
from typing import Iterable, List, Set, Tuple, Type, Union
def enum_to_choices(enumeration: Type[Enum]) -> Iterable[Tuple]:
return tuple((e.value, e.value) for e in enumeration)
def enum_to_set(enumeration: Type[Enum]) -> Set:
return set(e.value for e in enumeration)
def values_to_choices(enumeration: Union[List, Set]) -> Iterable[Tuple]:
return tuple((e, e) for e in enumeration)
|
gregmbi/polyaxon | core/tests/test_sidecar/test_monitor.py | #!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import uuid
from tests.test_k8s.fixtures import (
status_run_job_event,
status_run_job_event_with_conditions,
)
from tests.utils import BaseTestCase
from polyaxon.env_vars.keys import POLYAXON_KEYS_RUN_INSTANCE
from polyaxon.exceptions import PolyaxonContainerException
from polyaxon.k8s.monitor import is_container_terminated
from polyaxon.sidecar import start_sidecar
class TestSidecar(BaseTestCase):
def test_is_container_terminated_no_status(self):
status = {"container_statuses": []}
assert is_container_terminated(status, container_id="test") is None
status = {"container_statuses": {}}
assert is_container_terminated(status, container_id="test") is None
def test_is_container_terminated(self):
assert (
is_container_terminated(
status_run_job_event["object"]["status"], container_id="test"
)
is None
)
# using wrong container id
assert (
is_container_terminated(
status_run_job_event_with_conditions["object"]["status"],
container_id="test",
)
is None
)
# using correct container id
assert (
is_container_terminated(
status_run_job_event_with_conditions["object"]["status"],
container_id="polyaxon-main-job",
)["exit_code"]
== 1
)
def test_monitor_raise_if_no_env_is_set(self):
os.environ[POLYAXON_KEYS_RUN_INSTANCE] = "foo"
with self.assertRaises(PolyaxonContainerException):
start_sidecar(
container_id="foo",
sleep_interval=3,
sync_interval=6,
monitor_outputs=True,
monitor_logs=False,
)
del os.environ[POLYAXON_KEYS_RUN_INSTANCE]
def test_monitor_raise_if_no_pod_id(self):
os.environ[POLYAXON_KEYS_RUN_INSTANCE] = "owner.project.runs.{}".format(
uuid.uuid4().hex
)
with self.assertRaises(PolyaxonContainerException):
start_sidecar(
container_id="foo",
sleep_interval=3,
sync_interval=6,
monitor_outputs=True,
monitor_logs=False,
)
del os.environ[POLYAXON_KEYS_RUN_INSTANCE]
|
gregmbi/polyaxon | core/tests/test_tracking/test_events/test_event_values.py | <reponame>gregmbi/polyaxon
#!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import numpy as np
import os
import tempfile
import pytest
from bokeh.plotting import figure
from PIL import Image
from plotly import figure_factory
from tests.utils import BaseTestCase
from polyaxon.tracking.events.events_processors import (
audio,
bokeh_chart,
convert_to_HWC,
histogram,
image,
image_boxes,
plotly_chart,
prepare_video,
video,
)
def tensor_np(shape, dtype=float):
return np.arange(np.prod(shape), dtype=dtype).reshape(shape)
@pytest.mark.tracking_mark
class TestEventValues(BaseTestCase):
def setUp(self):
self.run_path = tempfile.mkdtemp()
self.asset_path = self.run_path + "/asset"
def test_uint8_image(self):
"""Tests that uint8 image (pixel values in [0, 255]) is not changed"""
assert os.path.exists(self.asset_path) is False
event = image(
asset_path=self.asset_path,
data=tensor_np(shape=(3, 32, 32), dtype=np.uint8),
)
assert event.path == self.asset_path
assert os.path.exists(self.asset_path) is True
def test_float32_image(self):
"""Tests that float32 image (pixel values in [0, 1]) are scaled correctly to [0, 255]"""
assert os.path.exists(self.asset_path) is False
event = image(asset_path=self.asset_path, data=tensor_np(shape=(3, 32, 32)))
assert event.path == self.asset_path
assert os.path.exists(self.asset_path) is True
def test_float_1_converts_to_uint8_255(self):
assert os.path.exists(self.asset_path) is False
green_uint8 = np.array([[[0, 255, 0]]], dtype="uint8")
green_float32 = np.array([[[0, 1, 0]]], dtype="float32")
a = image(asset_path=self.run_path + "/asset1", data=green_uint8)
b = image(asset_path=self.run_path + "/asset2", data=green_float32)
self.assertEqual(
Image.open(io.BytesIO(open(a.path, "br").read())),
Image.open(io.BytesIO(open(b.path, "br").read())),
)
def test_list_input(self):
with pytest.raises(Exception):
histogram("dummy", [1, 3, 4, 5, 6], "tensorflow")
def test_empty_input(self):
print("expect error here:")
with pytest.raises(Exception):
histogram("dummy", np.ndarray(0), "tensorflow")
def test_image_with_boxes(self):
event = image_boxes(
asset_path=self.asset_path,
tensor_image=tensor_np(shape=(3, 32, 32)),
tensor_boxes=np.array([[10, 10, 40, 40]]),
)
assert event.path == self.asset_path
assert os.path.exists(self.asset_path) is True
def test_image_with_one_channel(self):
event = image(
asset_path=self.asset_path,
data=tensor_np(shape=(1, 8, 8)),
dataformats="CHW",
)
assert event.path == self.asset_path
assert os.path.exists(self.asset_path) is True
def test_image_with_four_channel(self):
event = image(
asset_path=self.asset_path,
data=tensor_np(shape=(4, 8, 8)),
dataformats="CHW",
)
assert event.path == self.asset_path
assert os.path.exists(self.asset_path) is True
def test_image_with_one_channel_batched(self):
event = image(
asset_path=self.asset_path,
data=tensor_np(shape=(2, 1, 8, 8)),
dataformats="NCHW",
)
assert event.path == self.asset_path
assert os.path.exists(self.asset_path) is True
def test_image_with_3_channel_batched(self):
event = image(
asset_path=self.asset_path,
data=tensor_np(shape=(2, 3, 8, 8)),
dataformats="NCHW",
)
assert event.path == self.asset_path
assert os.path.exists(self.asset_path) is True
def test_image_with_four_channel_batched(self):
event = image(
asset_path=self.asset_path,
data=tensor_np(shape=(2, 4, 8, 8)),
dataformats="NCHW",
)
assert event.path == self.asset_path
assert os.path.exists(self.asset_path) is True
def test_image_without_channel(self):
event = image(
asset_path=self.asset_path, data=tensor_np(shape=(8, 8)), dataformats="HW"
)
assert event.path == self.asset_path
assert os.path.exists(self.asset_path) is True
def test_video(self):
asset_path = self.asset_path + ".gif"
event = video(asset_path=asset_path, tensor=tensor_np(shape=(4, 3, 1, 8, 8)))
assert event.path == asset_path
assert os.path.exists(asset_path) is True
event = video(
asset_path=asset_path, tensor=tensor_np(shape=(16, 48, 1, 28, 28))
)
assert event.path == asset_path
assert os.path.exists(asset_path) is True
event = video(asset_path=asset_path, tensor=tensor_np(shape=(20, 7, 1, 8, 8)))
assert event.path == asset_path
assert os.path.exists(asset_path) is True
def test_audio(self):
event = audio(asset_path=self.asset_path, tensor=tensor_np(shape=(42,)))
assert event.path == self.asset_path
assert os.path.exists(self.asset_path) is True
def test_histogram_auto(self):
event = histogram(values=tensor_np(shape=(1024,)), bins="auto", max_bins=5)
assert event.values is not None
assert event.counts is not None
def test_histogram(self):
event = histogram(values=tensor_np(shape=(1024,)), bins="fd", max_bins=5)
assert event.values is not None
assert event.counts is not None
def test_histogram_doane(self):
event = histogram(tensor_np(shape=(1024,)), bins="doane", max_bins=5)
assert event.values is not None
assert event.counts is not None
def test_to_HWC(self): # noqa
np.random.seed(1)
test_image = np.random.randint(0, 256, size=(3, 32, 32), dtype=np.uint8)
converted = convert_to_HWC(test_image, "chw")
assert converted.shape == (32, 32, 3)
test_image = np.random.randint(0, 256, size=(16, 3, 32, 32), dtype=np.uint8)
converted = convert_to_HWC(test_image, "nchw")
assert converted.shape == (64, 256, 3)
test_image = np.random.randint(0, 256, size=(32, 32), dtype=np.uint8)
converted = convert_to_HWC(test_image, "hw")
assert converted.shape == (32, 32, 3)
def test_prepare_video(self):
# at each timestep the sum over all other dimensions of the video should stay the same
np.random.seed(1)
video_before = np.random.random((4, 10, 3, 20, 20))
video_after = prepare_video(np.copy(video_before))
video_before = np.swapaxes(video_before, 0, 1)
video_before = np.reshape(video_before, newshape=(10, -1))
video_after = np.reshape(video_after, newshape=(10, -1))
np.testing.assert_array_almost_equal(
np.sum(video_before, axis=1), np.sum(video_after, axis=1)
)
def test_bokeh_chart(self):
# prepare some data
x = [1, 2, 3, 4, 5]
y = [6, 7, 2, 4, 5]
# create a new plot with a title and axis labels
p = figure(title="simple line example", x_axis_label="x", y_axis_label="y")
# add a line renderer with legend and line thickness
p.line(x, y, line_width=2)
# show the results
event = bokeh_chart(p)
assert isinstance(event.figure, dict)
def test_plotly_chart(self):
x1 = np.random.randn(200) - 2
x2 = np.random.randn(200)
x3 = np.random.randn(200) + 2
hist_data = [x1, x2, x3]
group_labels = ["Group 1", "Group 2", "Group 3"]
p = figure_factory.create_distplot(
hist_data, group_labels, bin_size=[0.1, 0.25, 0.5]
)
# show the results
event = plotly_chart(p)
assert isinstance(event.figure, dict)
|
gregmbi/polyaxon | core/polyaxon/tracking/contrib/fastai.py | #!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from polyaxon import settings
from polyaxon.exceptions import PolyaxonClientException
from polyaxon.tracking import Run
try:
from fastai.callbacks import TrackerCallback
except ImportError:
raise PolyaxonClientException("Fastai is required to use PolyaxonFastai")
class PolyaxonFastai(TrackerCallback):
def __init__(self, learn, run=None, monitor="auto", mode="auto"):
super().__init__(learn, monitor=monitor, mode=mode)
if monitor is None:
# use default TrackerCallback monitor value
super().__init__(learn, mode=mode)
self.run = run
if settings.CLIENT_CONFIG.is_managed:
self.run = self.run or Run()
def on_epoch_end(self, epoch, smooth_loss, last_metrics, **kwargs):
if not self.experiment:
return
metrics = {
name: stat
for name, stat in list(
zip(self.learn.recorder.names, [epoch, smooth_loss] + last_metrics)
)[1:]
}
self.run.log_metrics(**metrics)
|
gregmbi/polyaxon | core/polyaxon/k8s/events.py | #!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def get_container_statuses_by_name(statuses):
return {
container_status["name"]: {
"ready": container_status["ready"],
"state": container_status["state"],
}
for container_status in statuses
}
def get_container_status(statuses, job_container_names):
job_container_status = None
for job_container_name in job_container_names:
job_container_status = statuses.get(job_container_name)
if job_container_status:
break
return job_container_status
|
gregmbi/polyaxon | core/tests/test_lifecycles/test_lifecycle.py | <filename>core/tests/test_lifecycles/test_lifecycle.py
#!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tests.utils import BaseTestCase
from polyaxon.lifecycle import LifeCycle, V1Statuses
class TestStatusesTransition(BaseTestCase):
def test_values(self):
assert len(LifeCycle.VALUES) == 16
def test_warning_statuses(self):
assert LifeCycle.WARNING_VALUES == {
V1Statuses.WARNING,
V1Statuses.UNSCHEDULABLE,
}
def test_pending_statuses(self):
assert LifeCycle.PENDING_VALUES == {
V1Statuses.CREATED,
V1Statuses.RESUMING,
V1Statuses.SCHEDULED,
}
def test_running_statuses(self):
assert LifeCycle.RUNNING_VALUES == {
V1Statuses.STARTING,
V1Statuses.RUNNING,
}
def test_done_statuses(self):
assert LifeCycle.DONE_VALUES == {
V1Statuses.FAILED,
V1Statuses.UPSTREAM_FAILED,
V1Statuses.SUCCEEDED,
V1Statuses.STOPPED,
V1Statuses.SKIPPED,
}
def test_can_check_heartbeat(self):
assert LifeCycle.can_check_heartbeat(None) is False
for status in LifeCycle.VALUES:
if LifeCycle.is_running(status):
assert LifeCycle.can_check_heartbeat(status) is True
else:
assert LifeCycle.can_check_heartbeat(status) is False
def test_is_unschedulable(self):
assert LifeCycle.is_unschedulable(None) is False
for status in LifeCycle.VALUES:
if status == V1Statuses.UNSCHEDULABLE:
assert LifeCycle.is_unschedulable(status) is True
else:
assert LifeCycle.is_unschedulable(status) is False
def test_is_warning(self):
assert LifeCycle.is_warning(None) is False
for status in LifeCycle.VALUES:
if status in LifeCycle.WARNING_VALUES:
assert LifeCycle.is_warning(status) is True
else:
assert LifeCycle.is_warning(status) is False
def test_is_pending(self):
assert LifeCycle.is_pending(None) is False
for status in LifeCycle.VALUES:
if status in LifeCycle.PENDING_VALUES:
assert LifeCycle.is_pending(status) is True
else:
assert LifeCycle.is_pending(status) is False
def test_is_starting(self):
assert LifeCycle.is_starting(None) is False
for status in LifeCycle.VALUES:
if status == V1Statuses.STARTING:
assert LifeCycle.is_starting(status) is True
else:
assert LifeCycle.is_starting(status) is False
def test_is_running(self):
assert LifeCycle.is_running(None) is False
for status in LifeCycle.VALUES:
if status in LifeCycle.RUNNING_VALUES:
assert LifeCycle.is_running(status) is True
else:
assert LifeCycle.is_running(status) is False
def test_is_unknown(self):
assert LifeCycle.is_unknown(None) is False
for status in LifeCycle.VALUES:
if status == V1Statuses.UNKNOWN:
assert LifeCycle.is_unknown(status) is True
else:
assert LifeCycle.is_unknown(status) is False
def test_is_k8s_stoppable(self):
assert LifeCycle.is_k8s_stoppable(None) is False
for status in LifeCycle.VALUES:
cond = (
LifeCycle.is_running(status)
or LifeCycle.is_unschedulable(status)
or LifeCycle.is_warning(status=status)
or LifeCycle.is_unknown(status=status)
)
if cond:
assert LifeCycle.is_k8s_stoppable(status) is True
else:
assert LifeCycle.is_k8s_stoppable(status) is False
def is_stopping(self):
assert LifeCycle.is_stopping(None) is False
for status in LifeCycle.VALUES:
if status == V1Statuses.STOPPING:
assert LifeCycle.is_stopping(status) is True
else:
assert LifeCycle.is_stopping(status) is False
def test_is_stoppable(self):
assert LifeCycle.is_stoppable(None) is True
for status in LifeCycle.VALUES:
if not LifeCycle.is_done(status):
assert LifeCycle.is_stoppable(status) is True
else:
assert LifeCycle.is_stoppable(status) is False
def test_is_done(self):
assert LifeCycle.is_done(None) is False
for status in LifeCycle.VALUES:
if status in LifeCycle.DONE_VALUES:
assert LifeCycle.is_done(status) is True
else:
assert LifeCycle.is_done(status) is False
def test_succeeded(self):
assert LifeCycle.succeeded(None) is False
for status in LifeCycle.VALUES:
if status == V1Statuses.SUCCEEDED:
assert LifeCycle.succeeded(status) is True
else:
assert LifeCycle.succeeded(status) is False
def test_failed(self):
assert LifeCycle.failed(None) is False
for status in LifeCycle.VALUES:
if status in {V1Statuses.FAILED, V1Statuses.UPSTREAM_FAILED}:
assert LifeCycle.failed(status) is True
else:
assert LifeCycle.failed(status) is False
def test_skipped(self):
assert LifeCycle.skipped(None) is False
for status in LifeCycle.VALUES:
if status == V1Statuses.SKIPPED:
assert LifeCycle.skipped(status) is True
else:
assert LifeCycle.skipped(status) is False
|
gregmbi/polyaxon | core/polyaxon/stores/polyaxon_store.py | <reponame>gregmbi/polyaxon<filename>core/polyaxon/stores/polyaxon_store.py
#!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import requests
from polyaxon import settings
from polyaxon.client.transport.utils import progress_bar
from polyaxon.exceptions import (
HTTP_ERROR_MESSAGES_MAPPING,
PolyaxonClientException,
PolyaxonShouldExitError,
)
from polyaxon.logger import logger
from polyaxon.stores.base_store import StoreMixin
from polyaxon.utils.path_utils import check_or_create_path, get_path, untar_file
class PolyaxonStore(StoreMixin):
"""
Polyaxon filesystem store.
Used to download data from Polyaxon streams apis.
By default this store requires a valid run.
"""
URL = "/streams/v1/{namespace}/{owner}/{project}/runs/{uuid}/artifact"
def __init__(self, client: "RunClient"): # noqa
self._client = client
def ls(self, path):
return self.list(path=path)
def list(self, path):
return self._client.get_artifacts_tree(path=path)
def _get_headers(self, headers=None):
request_headers = headers or {}
# Auth headers if access_token is present
if self._client.client.config:
config = self._client.client.config
if "Authorization" not in request_headers and config.token:
request_headers.update(
{
"Authorization": "{} {}".format(
config.authentication_type, config.token
)
}
)
if config.header and config.header_service:
request_headers.update({config.header: config.header_service})
return request_headers
@staticmethod
def check_response_status(response, endpoint):
"""Check if response is successful. Else raise Exception."""
if 200 <= response.status_code < 300:
return response
try:
logger.error(
"Request to %s failed with status code %s. \n" "Reason: %s",
endpoint,
response.status_code,
response.text,
)
except TypeError:
logger.error("Request to %s failed with status code", endpoint)
raise PolyaxonClientException(
HTTP_ERROR_MESSAGES_MAPPING.get(response.status_code)
)
def download(
self,
url,
filename,
params=None,
headers=None,
timeout=None,
session=None,
untar=False,
delete_tar=True,
extract_path=None,
):
"""
Download the file from the given url at the current path
"""
# pylint:disable=too-many-branches
logger.debug("Downloading files from url: %s", url)
request_headers = self._get_headers(headers=headers)
timeout = timeout if timeout is not None else settings.LONG_REQUEST_TIMEOUT
session = session or requests.Session()
try:
response = session.get(
url,
params=params,
headers=request_headers,
timeout=timeout,
stream=True,
)
self.check_response_status(response, url)
with open(filename, "wb") as f:
# chunk mode response doesn't have content-length so we are
# using a custom header here
content_length = response.headers.get("x-polyaxon-content-length")
if not content_length:
content_length = response.headers.get("content-length")
if content_length:
for chunk in progress_bar(
response.iter_content(chunk_size=1024),
expected_size=(int(content_length) / 1024) + 1,
):
if chunk:
f.write(chunk)
else:
for chunk in response.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
if untar:
filename = untar_file(
filename=filename, delete_tar=delete_tar, extract_path=extract_path
)
return filename
except (
requests.exceptions.ConnectionError,
requests.exceptions.RequestException,
requests.exceptions.Timeout,
requests.exceptions.HTTPError,
) as exception:
try:
logger.debug("Exception: %s", exception)
except TypeError:
pass
raise PolyaxonShouldExitError(
"Error connecting to Polyaxon server on `{}`.\n"
"An Error `{}` occurred.\n"
"Check your host and ports configuration "
"and your internet connection.".format(url, exception)
)
def download_file(self, url, path, **kwargs):
local_path = get_path(
settings.CLIENT_CONFIG.archive_root, self._client.run_uuid,
)
_local_path = local_path
if path:
_local_path = get_path(local_path, path)
if kwargs.get("untar"):
_local_path = _local_path + ".tar.gz"
check_or_create_path(_local_path, is_dir=False)
if not os.path.exists(_local_path):
self.download(
filename=_local_path, params={"path": path}, url=url, **kwargs
)
return local_path
def upload_file(
self, dirname, path_to, use_basename=True, workers=0, last_time=None
):
pass
def upload_dir(
self, dirname, path_to, use_basename=True, workers=0, last_time=None
):
pass
def download_dir(
self, path_from, local_path, use_basename=True, workers=0, **kwargs
):
pass
def delete(self, path, **kwargs):
pass
|
gregmbi/polyaxon | core/polyaxon/schemas/types/dockerfile.py | <reponame>gregmbi/polyaxon
#!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import polyaxon_sdk
from marshmallow import fields, validates_schema
from polyaxon.schemas.base import BaseCamelSchema, BaseConfig
from polyaxon.schemas.fields.docker_image import validate_image
from polyaxon.schemas.fields.ref_or_obj import RefOrObject
POLYAXON_DOCKERFILE_NAME = "Dockerfile"
POLYAXON_DOCKER_WORKDIR = "/code"
POLYAXON_DOCKER_SHELL = "/bin/bash"
class DockerfileTypeSchema(BaseCamelSchema):
image = RefOrObject(fields.Str(), required=True)
env = RefOrObject(fields.Dict(keys=fields.Str(), allow_none=True))
path = RefOrObject(fields.List(fields.Str(), allow_none=True))
copy = RefOrObject(fields.List(fields.Str(), allow_none=True))
run = RefOrObject(fields.List(fields.Str(), allow_none=True))
lang_env = RefOrObject(fields.Str(allow_none=True))
uid = RefOrObject(fields.Int(allow_none=True))
gid = RefOrObject(fields.Int(allow_none=True))
filename = RefOrObject(fields.Str(allow_none=True))
workdir = RefOrObject(fields.Str(allow_none=True))
workdir_path = RefOrObject(fields.Str(allow_none=True))
shell = RefOrObject(fields.Str(allow_none=True))
@staticmethod
def schema_config():
return V1DockerfileType
@validates_schema
def validate_dockerfile(self, data, **kwargs):
validate_image(data.get("image"))
class V1DockerfileType(BaseConfig, polyaxon_sdk.V1DockerfileType):
IDENTIFIER = "dockerfile"
SCHEMA = DockerfileTypeSchema
REDUCED_ATTRIBUTES = [
"image",
"env",
"path",
"copy",
"run",
"langEnv",
"uid",
"gid",
"filename",
"workdir",
"workdirPath",
"shell",
]
@property
def filename(self):
return (
self._filename if self._filename is not None else POLYAXON_DOCKERFILE_NAME
)
@filename.setter
def filename(self, filename):
self._filename = filename
@property
def workdir(self):
return self._workdir if self._workdir is not None else POLYAXON_DOCKER_WORKDIR
@workdir.setter
def workdir(self, workdir):
self._workdir = workdir
@property
def shell(self):
return self._shell if self._shell is not None else POLYAXON_DOCKER_SHELL
@shell.setter
def shell(self, shell):
self._shell = shell
@property
def image_tag(self):
if not self.image:
return None
tagged_image = self.image.split(":")
if len(tagged_image) == 1:
return "latest"
if len(tagged_image) == 2:
return "latest" if "/" in tagged_image[-1] else tagged_image[-1]
if len(tagged_image) == 3:
return tagged_image[-1]
|
gregmbi/polyaxon | core/polyaxon/tracking/events/events_processors.py | <filename>core/polyaxon/tracking/events/events_processors.py<gh_stars>0
#!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import os
import shutil
from typing import Dict, List
from polyaxon.constants import UNKNOWN
from polyaxon.logger import logger
from polyaxon.polyboard.artifacts import V1ArtifactKind
from polyaxon.polyboard.events import (
LoggedEventSpec,
V1Event,
V1EventArtifact,
V1EventAudio,
V1EventChart,
V1EventChartKind,
V1EventDataframe,
V1EventHistogram,
V1EventImage,
V1EventModel,
V1EventVideo,
)
from polyaxon.utils.np_utils import calculate_scale_factor, to_np
from polyaxon.utils.path_utils import check_or_create_path, module_type
try:
import numpy as np
except ImportError:
np = None
NUMPY_ERROR_MESSAGE = "numpy is required for this tracking operation!"
PIL_ERROR_MESSAGE = "PIL/Pillow is required for this tracking operation!"
MOVIEPY_ERROR_MESSAGE = "moviepy is required for this tracking operation!"
MATPLOTLIB_ERROR_MESSAGE = "matplotlib is required for this tracking operation!"
PLOTLY_ERROR_MESSAGE = "plotly is required for this tracking operation!"
BOKEH_ERROR_MESSAGE = "bokeh is required for this tracking operation!"
def dataframe_path(
from_path: str, asset_path: str, content_type: str = None
) -> V1EventDataframe:
check_or_create_path(asset_path, is_dir=False)
shutil.copy(from_path, asset_path)
return V1EventDataframe(path=asset_path, content_type=content_type)
def model_path(
from_path: str, asset_path: str, framework: str = None, spec: Dict = None
) -> V1EventModel:
check_or_create_path(asset_path, is_dir=False)
if os.path.isfile(from_path):
shutil.copy(from_path, asset_path)
else:
shutil.copytree(from_path, asset_path)
return V1EventModel(path=asset_path, framework=framework, spec=spec)
def artifact_path(from_path: str, asset_path: str, kind: str) -> V1EventArtifact:
check_or_create_path(asset_path, is_dir=False)
shutil.copy(from_path, asset_path)
return V1EventArtifact(kind=kind, path=asset_path)
def image_path(from_path: str, asset_path: str) -> V1EventImage:
check_or_create_path(asset_path, is_dir=False)
shutil.copy(from_path, asset_path)
return V1EventImage(path=asset_path)
def video_path(from_path: str, asset_path: str, content_type=None) -> V1EventVideo:
check_or_create_path(asset_path, is_dir=False)
shutil.copy(from_path, asset_path)
return V1EventVideo(path=asset_path, content_type=content_type)
def audio_path(from_path: str, asset_path: str, content_type=None) -> V1EventAudio:
check_or_create_path(asset_path, is_dir=False)
shutil.copy(from_path, asset_path)
return V1EventAudio(path=asset_path, content_type=content_type)
def _draw_single_box(
image,
xmin,
ymin,
xmax,
ymax,
display_str,
color="black",
color_text="black",
thickness=2,
):
if not np:
logger.warning(NUMPY_ERROR_MESSAGE)
return UNKNOWN
try:
from PIL import ImageDraw, ImageFont
except ImportError:
logger.warning(PIL_ERROR_MESSAGE)
return UNKNOWN
font = ImageFont.load_default()
draw = ImageDraw.Draw(image)
(left, right, top, bottom) = (xmin, xmax, ymin, ymax)
draw.line(
[(left, top), (left, bottom), (right, bottom), (right, top), (left, top)],
width=thickness,
fill=color,
)
if display_str:
text_bottom = bottom
# Reverse list and print from bottom to top.
text_width, text_height = font.getsize(display_str)
margin = np.ceil(0.05 * text_height)
draw.rectangle(
[
(left, text_bottom - text_height - 2 * margin),
(left + text_width, text_bottom),
],
fill=color,
)
draw.text(
(left + margin, text_bottom - text_height - margin),
display_str,
fill=color_text,
font=font,
)
return image
def metric(value):
if isinstance(value, float):
return value
if not np:
logger.warning(NUMPY_ERROR_MESSAGE)
return UNKNOWN
value = to_np(value)
assert value.squeeze().ndim == 0, "scalar should be 0D"
return float(value)
def histogram(values, bins, max_bins=None):
if not np:
logger.warning(NUMPY_ERROR_MESSAGE)
return UNKNOWN
values = to_np(values).astype(float)
if values.size == 0:
raise ValueError("The input has no element.")
values = values.reshape(-1)
counts, limits = np.histogram(values, bins=bins)
num_bins = len(counts)
if max_bins is not None and num_bins > max_bins:
subsampling = num_bins // max_bins
subsampling_remainder = num_bins % subsampling
if subsampling_remainder != 0:
counts = np.pad(
counts,
pad_width=[[0, subsampling - subsampling_remainder]],
mode="constant",
constant_values=0,
)
counts = counts.reshape(-1, subsampling).sum(axis=-1)
if counts.size == 0:
logger.warning("Tracking an empty histogram")
return UNKNOWN
return V1EventHistogram(values=values, counts=counts)
def np_histogram(values, counts):
return V1EventHistogram(values=values, counts=counts)
def encoded_image(asset_path, data):
try:
from PIL import Image
except ImportError:
logger.warning(PIL_ERROR_MESSAGE)
return UNKNOWN
image_data = Image.open(io.BytesIO(data.encoded_image_string))
return save_image(
asset_path=asset_path,
image_data=image_data,
height=data.height,
width=data.width,
colorspace=data.colorspace,
)
def image(asset_path, data, rescale=1, dataformats="CHW"):
if not np:
logger.warning(NUMPY_ERROR_MESSAGE)
return UNKNOWN
tensor = to_np(data)
tensor = convert_to_HWC(tensor, dataformats)
# Do not assume that user passes in values in [0, 255], use data type to detect
scale_factor = calculate_scale_factor(tensor)
tensor = tensor.astype(np.float32)
tensor = (tensor * scale_factor).astype(np.uint8)
return make_image(asset_path, tensor, rescale=rescale)
def image_boxes(asset_path, tensor_image, tensor_boxes, rescale=1, dataformats="CHW"):
if not np:
logger.warning(NUMPY_ERROR_MESSAGE)
return UNKNOWN
tensor_image = to_np(tensor_image)
tensor_image = convert_to_HWC(tensor_image, dataformats)
tensor_boxes = to_np(tensor_boxes)
tensor_image = tensor_image.astype(np.float32) * calculate_scale_factor(
tensor_image
)
return make_image(
asset_path, tensor_image.astype(np.uint8), rescale=rescale, rois=tensor_boxes
)
def draw_boxes(disp_image, boxes):
# xyxy format
num_boxes = boxes.shape[0]
list_gt = range(num_boxes)
for i in list_gt:
disp_image = _draw_single_box(
disp_image,
boxes[i, 0],
boxes[i, 1],
boxes[i, 2],
boxes[i, 3],
display_str=None,
color="Red",
)
return disp_image
def make_image(asset_path, tensor, rescale=1, rois=None):
try:
from PIL import Image
except ImportError:
logger.warning(PIL_ERROR_MESSAGE)
return UNKNOWN
height, width, colorspace = tensor.shape
scaled_height = int(height * rescale)
scaled_width = int(width * rescale)
image_data = Image.fromarray(tensor)
if rois is not None:
image_data = draw_boxes(image_data, rois)
image_data = image_data.resize((scaled_width, scaled_height), Image.ANTIALIAS)
return save_image(
asset_path=asset_path,
image_data=image_data,
height=height,
width=width,
colorspace=colorspace,
)
def save_image(asset_path, image_data, height, width, colorspace):
check_or_create_path(asset_path, is_dir=False)
image_data.save(asset_path, format="PNG")
return V1EventImage(
height=height, width=width, colorspace=colorspace, path=asset_path
)
def video(asset_path, tensor, fps=4, content_type="gif"):
if not np:
logger.warning(NUMPY_ERROR_MESSAGE)
return UNKNOWN
tensor = to_np(tensor)
tensor = prepare_video(tensor)
# If user passes in uint8, then we don't need to rescale by 255
scale_factor = calculate_scale_factor(tensor)
tensor = tensor.astype(np.float32)
tensor = (tensor * scale_factor).astype(np.uint8)
return make_video(asset_path, tensor, fps, content_type)
def make_video(asset_path, tensor, fps, content_type="gif"):
try:
import moviepy # noqa: F401
except ImportError:
logger.warning(MOVIEPY_ERROR_MESSAGE)
return UNKNOWN
try:
from moviepy import editor as mpy
except ImportError:
logger.warning(
"moviepy is installed, but can't import moviepy.editor.",
"Some packages could be missing [imageio, requests]",
)
return
t, h, w, c = tensor.shape
# encode sequence of images into gif string
clip = mpy.ImageSequenceClip(list(tensor), fps=fps)
check_or_create_path(asset_path, is_dir=False)
try: # older version of moviepy
if content_type == "gif":
clip.write_gif(asset_path, verbose=False, progress_bar=False)
else:
clip.write_videofile(asset_path, verbose=False, progress_bar=False)
except TypeError:
if content_type == "gif":
clip.write_gif(asset_path, verbose=False)
else:
clip.write_videofile(asset_path, verbose=False)
return V1EventVideo(
height=h, width=w, colorspace=c, path=asset_path, content_type=content_type
)
def audio(asset_path, tensor, sample_rate=44100):
if not np:
logger.warning(NUMPY_ERROR_MESSAGE)
return UNKNOWN
tensor = to_np(tensor)
tensor = tensor.squeeze()
if abs(tensor).max() > 1:
print("warning: audio amplitude out of range, auto clipped.")
tensor = tensor.clip(-1, 1)
assert tensor.ndim == 1, "input tensor should be 1 dimensional."
tensor_list = [int(32767.0 * x) for x in tensor]
import wave
import struct
check_or_create_path(asset_path, is_dir=False)
wave_write = wave.open(asset_path, "wb")
wave_write.setnchannels(1)
wave_write.setsampwidth(2)
wave_write.setframerate(sample_rate)
tensor_enc = b""
for v in tensor_list:
tensor_enc += struct.pack("<h", v)
wave_write.writeframes(tensor_enc)
wave_write.close()
return V1EventAudio(
sample_rate=sample_rate,
num_channels=1,
length_frames=len(tensor_list),
path=asset_path,
content_type="audio/wav",
)
# https://github.com/tensorflow/tensorboard/blob/master/tensorboard/plugins/pr_curve/summary.py
def compute_curve(labels, predictions, num_thresholds=None, weights=None):
if not np:
logger.warning(NUMPY_ERROR_MESSAGE)
return UNKNOWN
_MINIMUM_COUNT = 1e-7
if weights is None:
weights = 1.0
# Compute bins of true positives and false positives.
bucket_indices = np.int32(np.floor(predictions * (num_thresholds - 1)))
float_labels = labels.astype(np.float)
histogram_range = (0, num_thresholds - 1)
tp_buckets, _ = np.histogram(
bucket_indices,
bins=num_thresholds,
range=histogram_range,
weights=float_labels * weights,
)
fp_buckets, _ = np.histogram(
bucket_indices,
bins=num_thresholds,
range=histogram_range,
weights=(1.0 - float_labels) * weights,
)
# Obtain the reverse cumulative sum.
tp = np.cumsum(tp_buckets[::-1])[::-1]
fp = np.cumsum(fp_buckets[::-1])[::-1]
tn = fp[0] - fp
fn = tp[0] - tp
precision = tp / np.maximum(_MINIMUM_COUNT, tp + fp)
recall = tp / np.maximum(_MINIMUM_COUNT, tp + fn)
return np.stack((tp, fp, tn, fn, precision, recall))
def figure_to_image(figure, close=True):
"""Render matplotlib figure to numpy format.
Returns:
numpy.array: image in [CHW] order
"""
if not np:
logger.warning(NUMPY_ERROR_MESSAGE)
return UNKNOWN
try:
import matplotlib.pyplot as plt
import matplotlib.backends.backend_agg as plt_backend_agg
except ImportError:
logger.warning(MATPLOTLIB_ERROR_MESSAGE)
canvas = plt_backend_agg.FigureCanvasAgg(figure)
canvas.draw()
data = np.frombuffer(canvas.buffer_rgba(), dtype=np.uint8)
w, h = figure.canvas.get_width_height()
image_hwc = data.reshape([h, w, 4])[:, :, 0:3]
image_chw = np.moveaxis(image_hwc, source=2, destination=0)
if close:
plt.close(figure)
return image_chw
def figures_to_images(figures, close=True):
"""Render matplotlib figure to numpy format.
Returns:
numpy.array: image in [CHW] order
"""
if not np:
logger.warning(NUMPY_ERROR_MESSAGE)
return UNKNOWN
images = [figure_to_image(figure, close=close) for figure in figures]
return np.stack(images)
def ensure_matplotlib_figure(figure):
"""Extract the current figure from a matplotlib object or return the object if it's a figure.
raises ValueError if the object can't be converted.
"""
import matplotlib
from matplotlib.figure import Figure
if figure == matplotlib.pyplot:
figure = figure.gcf()
elif not isinstance(figure, Figure):
if hasattr(figure, "figure"):
figure = figure.figure
# Some matplotlib objects have a figure function
if not isinstance(figure, Figure):
raise ValueError(
"Only matplotlib.pyplot or matplotlib.pyplot.Figure objects are accepted."
)
if not figure.gca().has_data():
raise ValueError(
"You attempted to log an empty plot, "
"pass a figure directly or ensure the global plot isn't closed."
)
return figure
def prepare_video(data):
"""
Converts a 5D tensor [batchsize, time(frame), channel(color), height, width]
into 4D tensor with dimension [time(frame), new_width, new_height, channel].
A batch of images are spreaded to a grid, which forms a frame.
e.g. Video with batchsize 16 will have a 4x4 grid.
"""
if not np:
logger.warning(NUMPY_ERROR_MESSAGE)
return UNKNOWN
b, t, c, h, w = data.shape
if data.dtype == np.uint8:
data = np.float32(data) / 255.0
def is_power2(num):
return num != 0 and ((num & (num - 1)) == 0)
# pad to nearest power of 2, all at once
if not is_power2(data.shape[0]):
len_addition = int(2 ** data.shape[0].bit_length() - data.shape[0])
data = np.concatenate(
(data, np.zeros(shape=(len_addition, t, c, h, w))), axis=0
)
n_rows = 2 ** ((b.bit_length() - 1) // 2)
n_cols = data.shape[0] // n_rows
data = np.reshape(data, newshape=(n_rows, n_cols, t, c, h, w))
data = np.transpose(data, axes=(2, 0, 4, 1, 5, 3))
return np.reshape(data, newshape=(t, n_rows * h, n_cols * w, c))
def make_grid(data, ncols=8):
# I: N1HW or N3HW
if not np:
logger.warning(NUMPY_ERROR_MESSAGE)
return UNKNOWN
assert isinstance(data, np.ndarray), "plugin error, should pass numpy array here"
if data.shape[1] == 1:
data = np.concatenate([data, data, data], 1)
assert data.ndim == 4 and data.shape[1] == 3 or data.shape[1] == 4
nimg = data.shape[0]
H = data.shape[2] # noqa
W = data.shape[3] # noqa
ncols = min(nimg, ncols)
nrows = int(np.ceil(float(nimg) / ncols))
canvas = np.zeros((data.shape[1], H * nrows, W * ncols))
i = 0
for y in range(nrows):
for x in range(ncols):
if i >= nimg:
break
canvas[:, y * H : (y + 1) * H, x * W : (x + 1) * W] = data[i] # noqa
i = i + 1
return canvas
def convert_to_HWC(tensor, input_format): # noqa
if not np:
logger.warning(NUMPY_ERROR_MESSAGE)
return UNKNOWN
assert len(set(input_format)) == len(
input_format
), "You can not use the same dimension shordhand twice. \
input_format: {}".format(
input_format
)
assert len(tensor.shape) == len(
input_format
), "size of input tensor and input format are different. \
tensor shape: {}, input_format: {}".format(
tensor.shape, input_format
)
input_format = input_format.upper()
if len(input_format) == 4:
index = [input_format.find(c) for c in "NCHW"]
tensor_NCHW = tensor.transpose(index) # noqa
tensor_CHW = make_grid(tensor_NCHW) # noqa
return tensor_CHW.transpose(1, 2, 0)
if len(input_format) == 3:
index = [input_format.find(c) for c in "HWC"]
tensor_HWC = tensor.transpose(index) # noqa
if tensor_HWC.shape[2] == 1:
tensor_HWC = np.concatenate([tensor_HWC, tensor_HWC, tensor_HWC], 2) # noqa
return tensor_HWC
if len(input_format) == 2:
index = [input_format.find(c) for c in "HW"]
tensor = tensor.transpose(index)
tensor = np.stack([tensor, tensor, tensor], 2)
return tensor
def bokeh_chart(figure) -> V1EventChart:
try:
from bokeh.embed import json_item
except ImportError:
logger.warning(BOKEH_ERROR_MESSAGE)
return UNKNOWN
return V1EventChart(kind=V1EventChartKind.BOKEH, figure=json_item(figure))
def plotly_chart(figure) -> V1EventChart:
try:
import plotly.tools
except ImportError:
logger.warning(PLOTLY_ERROR_MESSAGE)
return UNKNOWN
if module_type(figure, "matplotlib.figure.Figure"):
figure = plotly.tools.mpl_to_plotly(figure)
else:
figure = plotly.tools.return_figure_from_figure_or_data(
figure, validate_figure=True
)
return V1EventChart(kind=V1EventChartKind.PLOTLY, figure=figure)
def mpl_plotly_chart(figure) -> V1EventChart:
try:
import plotly.tools
except ImportError:
logger.warning(PLOTLY_ERROR_MESSAGE)
return UNKNOWN
try:
import matplotlib
from matplotlib.figure import Figure
except ImportError:
logger.warning(MATPLOTLIB_ERROR_MESSAGE)
if module_type(figure, "matplotlib.figure.Figure"):
pass
else:
if figure == matplotlib.pyplot:
figure = figure.gcf()
elif not isinstance(figure, Figure):
if hasattr(figure, "figure"):
figure = figure.figure
# Some matplotlib objects have a figure function
if not isinstance(figure, Figure):
raise ValueError(
"Only matplotlib.pyplot or matplotlib.pyplot.Figure objects are accepted."
)
figure = plotly.tools.mpl_to_plotly(figure)
return plotly_chart(figure=figure)
def metrics_dict_to_list(metrics: Dict) -> List:
results = []
for k, v in metrics.items():
results.append(
LoggedEventSpec(
name=k, kind=V1ArtifactKind.METRIC, event=V1Event.make(metric=v),
)
)
return results
|
gregmbi/polyaxon | core/polyaxon/utils/date_utils.py | <reponame>gregmbi/polyaxon
#!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytz
from datetime import date, datetime, timedelta
from dateutil import parser as dt_parser
from polyaxon.exceptions import PolyaxonDateTimeFormatterException
epoch = datetime(1970, 1, 1, tzinfo=pytz.utc)
def to_timestamp(value):
"""
Convert a time zone aware datetime to a POSIX timestamp (with fractional component.)
"""
if isinstance(value, str):
value = dt_parser.parse(value)
return (value - epoch).total_seconds()
def to_datetime(value):
"""
Convert a POSIX timestamp to a time zone aware datetime.
The timestamp value must be a numeric type (either a integer or float,
since it may contain a fractional component.)
"""
return epoch + timedelta(seconds=value)
def file_modified_since(filepath: str, last_time: datetime) -> bool:
return to_datetime(os.stat(filepath).st_mtime) > last_time
class DateTimeFormatter:
"""
The `DateTimeFormatter` class implements a utility used to create
timestamps from strings and vice-versa.
"""
DATE_FORMAT = "%Y-%m-%d"
DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S"
DATETIME_HOUR_FORMAT = "%Y-%m-%d %H:%M"
@classmethod
def format_date(cls, timestamp):
"""
Creates a string representing the date information provided by the
given `timestamp` object.
"""
if not timestamp:
raise PolyaxonDateTimeFormatterException(
"timestamp must a valid string {}".format(timestamp)
)
return timestamp.strftime(cls.DATE_FORMAT)
@classmethod
def format_datetime(cls, timestamp):
"""
Creates a string representing the date and time information provided by
the given `timestamp` object.
"""
if not timestamp:
raise PolyaxonDateTimeFormatterException(
"timestamp must a valid string {}".format(timestamp)
)
return timestamp.strftime(cls.DATETIME_FORMAT)
@classmethod
def extract_date(cls, date_str, timezone):
"""
Tries to extract a `datetime` object from the given string, expecting
date information only.
Raises `PolyaxonDateTimeFormatterException` if the extraction fails.
"""
if not date_str:
raise PolyaxonDateTimeFormatterException(
"date_str must a valid string {}.".format(date_str)
)
if not timezone:
raise PolyaxonDateTimeFormatterException(
"timezone is required, received {}".format(timezone)
)
try:
return cls._extract_timestamp(date_str, cls.DATE_FORMAT, timezone=timezone)
except (TypeError, ValueError):
raise PolyaxonDateTimeFormatterException(
"Invalid date string {}.".format(date_str)
)
@classmethod
def extract_datetime(cls, datetime_str, timezone):
"""
Tries to extract a `datetime` object from the given string, including
time information.
Raises `PolyaxonDateTimeFormatterException` if the extraction fails.
"""
if not datetime_str:
raise PolyaxonDateTimeFormatterException("datetime_str must a valid string")
if not timezone:
raise PolyaxonDateTimeFormatterException(
"timezone is required, received {}".format(timezone)
)
try:
return cls._extract_timestamp(
datetime_str, cls.DATETIME_FORMAT, timezone=timezone
)
except (TypeError, ValueError):
raise PolyaxonDateTimeFormatterException(
"Invalid datetime string {}.".format(datetime_str)
)
@classmethod
def extract_datetime_hour(cls, datetime_str, timezone):
"""
Tries to extract a `datetime` object from the given string, including only hours.
Raises `PolyaxonDateTimeFormatterException` if the extraction fails.
"""
if not datetime_str:
raise PolyaxonDateTimeFormatterException("datetime_str must a valid string")
if not timezone:
raise PolyaxonDateTimeFormatterException(
"timezone is required, received {}".format(timezone)
)
try:
return cls._extract_timestamp(
datetime_str, cls.DATETIME_HOUR_FORMAT, timezone=timezone
)
except (TypeError, ValueError):
raise PolyaxonDateTimeFormatterException(
"Invalid datetime string {}.".format(datetime_str)
)
@classmethod
def extract(cls, timestamp_str, timezone):
"""
Tries to extract a `datetime` object from the given string. First the
datetime format is tried, if it fails, the date format is used for
extraction.
Raises `PolyaxonDateTimeFormatterException` if the extraction fails.
"""
if not timestamp_str:
raise PolyaxonDateTimeFormatterException(
"timestamp_str must a valid string, received {}".format(timestamp_str)
)
if not timezone:
raise PolyaxonDateTimeFormatterException(
"timezone is required, received {}".format(timezone)
)
if isinstance(timestamp_str, (date, datetime)):
return timestamp_str
try:
return cls.extract_datetime(timestamp_str, timezone=timezone)
except PolyaxonDateTimeFormatterException:
pass
try:
return cls.extract_datetime_hour(timestamp_str, timezone=timezone)
except PolyaxonDateTimeFormatterException:
pass
# We leave it to raise
return cls.extract_date(timestamp_str, timezone=timezone)
@staticmethod
def _extract_timestamp(timestamp_str, dt_format, timezone):
timestamp = datetime.strptime(timestamp_str, dt_format)
timestamp = timestamp.replace(tzinfo=pytz.timezone(timezone))
return timestamp
|
gregmbi/polyaxon | core/polyaxon/proxies/schemas/base.py | #!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from polyaxon import settings
def _get_indent(indent):
return (
settings.PROXIES_CONFIG.nginx_indent_char
* settings.PROXIES_CONFIG.nginx_indent_width
* indent
)
def get_config(options, indent=0, **kwargs):
_options = options.format(**kwargs)
config = []
for p in _options.split("\n"):
config.append("{}{}".format(_get_indent(indent), p))
return "\n".join(config)
|
gregmbi/polyaxon | core/polyaxon/polyflow/early_stopping/policies.py | #!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import polyaxon_sdk
from marshmallow import fields, validate
from polyaxon.polyflow.optimization import Optimization
from polyaxon.schemas.base import BaseCamelSchema, BaseConfig, BaseOneOfSchema
from polyaxon.schemas.fields.ref_or_obj import RefOrObject
class MedianStoppingPolicySchema(BaseCamelSchema):
kind = fields.Str(allow_none=True, validate=validate.Equal("median"))
evaluation_interval = RefOrObject(fields.Int(), required=True)
min_interval = RefOrObject(fields.Int(allow_none=True))
min_samples = RefOrObject(fields.Int(allow_none=True))
@staticmethod
def schema_config():
return V1MedianStoppingPolicy
class V1MedianStoppingPolicy(BaseConfig, polyaxon_sdk.V1MedianStoppingPolicy):
IDENTIFIER = "median"
SCHEMA = MedianStoppingPolicySchema
REDUCED_ATTRIBUTES = ["minInterval", "minSamples"]
class TruncationStoppingPolicySchema(BaseCamelSchema):
kind = fields.Str(allow_none=True, validate=validate.Equal("truncation"))
percent = RefOrObject(fields.Float(), required=True)
evaluation_interval = RefOrObject(fields.Int(), required=True)
min_interval = RefOrObject(fields.Int(allow_none=True))
min_samples = RefOrObject(fields.Int(allow_none=True))
@staticmethod
def schema_config():
return V1TruncationStoppingPolicy
class V1TruncationStoppingPolicy(BaseConfig, polyaxon_sdk.V1TruncationStoppingPolicy):
IDENTIFIER = "truncation"
SCHEMA = TruncationStoppingPolicySchema
REDUCED_ATTRIBUTES = ["minInterval", "minSamples"]
class DiffStoppingPolicySchema(BaseCamelSchema):
kind = fields.Str(allow_none=True, validate=validate.Equal("diff"))
percent = RefOrObject(fields.Float(), required=True)
evaluation_interval = RefOrObject(fields.Int(), required=True)
min_interval = RefOrObject(fields.Int(allow_none=True))
min_samples = RefOrObject(fields.Int(allow_none=True))
@staticmethod
def schema_config():
return V1DiffStoppingPolicy
class V1DiffStoppingPolicy(BaseConfig, polyaxon_sdk.V1DiffStoppingPolicy):
IDENTIFIER = "diff"
SCHEMA = DiffStoppingPolicySchema
REDUCED_ATTRIBUTES = ["minInterval", "minSamples"]
class StoppingPolicySchema(BaseOneOfSchema):
TYPE_FIELD = "kind"
TYPE_FIELD_REMOVE = False
SCHEMAS = {
V1MedianStoppingPolicy.IDENTIFIER: MedianStoppingPolicySchema,
V1TruncationStoppingPolicy.IDENTIFIER: TruncationStoppingPolicySchema,
V1DiffStoppingPolicy.IDENTIFIER: DiffStoppingPolicySchema,
}
class MetricEarlyStoppingSchema(BaseCamelSchema):
kind = fields.Str(allow_none=True, validate=validate.Equal("metric_early_stopping"))
metric = RefOrObject(fields.Str(), required=True)
value = RefOrObject(fields.Float(), required=True)
optimization = RefOrObject(
fields.Str(validate=validate.OneOf(Optimization.VALUES)), required=True
)
policy = fields.Nested(StoppingPolicySchema, allow_none=True)
@staticmethod
def schema_config():
return V1MetricEarlyStopping
class V1MetricEarlyStopping(BaseConfig, polyaxon_sdk.V1MetricEarlyStopping):
SCHEMA = MetricEarlyStoppingSchema
IDENTIFIER = "metric_early_stopping"
class FailureEarlyStoppingSchema(BaseCamelSchema):
kind = fields.Str(
allow_none=True, validate=validate.Equal("failure_early_stopping")
)
percent = RefOrObject(fields.Float(), required=True)
evaluation_interval = RefOrObject(fields.Int(), required=True)
@staticmethod
def schema_config():
return V1FailureEarlyStopping
class V1FailureEarlyStopping(BaseConfig, polyaxon_sdk.V1FailureEarlyStopping):
IDENTIFIER = "failure_early_stopping"
SCHEMA = FailureEarlyStoppingSchema
|
gregmbi/polyaxon | core/polyaxon/utils/memoize_decorators.py | #!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def memoize(func):
"""
Provides memoization for methods on a specific instance.
Results are cached for given parameter list.
See also: http://en.wikipedia.org/wiki/Memoization
N.B. The cache object gets added to the instance instead of the global scope.
Therefore cached results are restricted to that instance.
The cache dictionary gets a name containing the name of the decorated function to
avoid clashes.
Example:
class MyClass(object):
@memoize
def foo(self, a, b):
return self._do_calculation(a, b)
HINT: - The decorator does not work with keyword arguments.
"""
cache_name = "__CACHED_{}".format(func.__name__)
def wrapper(self, *args):
cache = getattr(self, cache_name, None)
if cache is None:
cache = {}
setattr(self, cache_name, cache)
if args not in cache:
cache[args] = func(self, *args)
return cache[args]
return wrapper
|
gregmbi/polyaxon | examples/in_cluster/tensorflow/mnist/model.py | import argparse
import gzip
import numpy as np
import os
import tensorflow as tf
from six.moves.urllib.request import urlretrieve
# Polyaxon
from polyaxon_client.tracking import Experiment
ACTIVATIONS = {
'relu': tf.nn.relu,
'sigmoid': tf.sigmoid,
'tanh': tf.tanh,
}
OPTIMIZERS = {
'sgd': tf.train.GradientDescentOptimizer,
'rmsprop': tf.train.RMSPropOptimizer,
'adam': tf.train.AdamOptimizer,
}
MNIST_HOST = 'http://yann.lecun.com/exdb/mnist/'
TRAIN_IMAGES = 'train-images-idx3-ubyte.gz'
TRAIN_LABELS = 'train-labels-idx1-ubyte.gz'
TEST_IMAGES = 't10k-images-idx3-ubyte.gz'
TEST_LABELS = 't10k-labels-idx1-ubyte.gz'
IMAGE_WIDTH = 28
OUTPUT_CLASSES = 10
def load_onehot_data(filename):
with gzip.open(filename, 'rb') as unzipped_file:
data = np.frombuffer(unzipped_file.read(), dtype=np.uint8)
labels = data[8:]
length = len(labels)
onehot = np.zeros((length, OUTPUT_CLASSES), dtype=np.float32)
onehot[np.arange(length), labels] = 1
return onehot
def load_image_data(filename):
with gzip.open(filename, 'rb') as unzipped_file:
data = np.frombuffer(unzipped_file.read(), dtype=np.uint8)
images = data[16:].reshape((-1, IMAGE_WIDTH ** 2)).astype(np.float32)
images /= 255
return images
def load_mnist_data(path='/tmp/mnist'):
if not os.path.isdir(path):
os.makedirs(path)
for data_file in [
TRAIN_IMAGES,
TRAIN_LABELS,
TEST_IMAGES,
TEST_LABELS,
]:
destination = os.path.join(path, data_file)
if not os.path.isfile(destination):
urlretrieve("{}{}".format(MNIST_HOST, data_file), destination)
return (
(load_image_data(os.path.join(path, TRAIN_IMAGES)),
load_onehot_data(os.path.join(path, TRAIN_LABELS))),
(load_image_data(os.path.join(path, TEST_IMAGES)),
load_onehot_data(os.path.join(path, TEST_LABELS))),
)
def weight_variable(shape):
return tf.Variable(tf.truncated_normal(shape, stddev=0.1))
def bias_variable(shape):
return tf.Variable(tf.constant(0.1, shape=shape))
def conv_layer(x, filter_size, out_features, activation, pool_size):
W = weight_variable([filter_size, filter_size, x.get_shape()[3].value, out_features])
b = bias_variable([out_features])
conv = ACTIVATIONS[activation](tf.nn.conv2d(x, W, [1, 1, 1, 1], padding='SAME') + b)
pool = tf.nn.max_pool(conv, ksize=[1, pool_size, pool_size, 1],
strides=[1, pool_size, pool_size, 1], padding='SAME')
return pool
def fully_connected_layer(x, out_size):
W = weight_variable([x.get_shape()[1].value, out_size])
b = bias_variable([out_size])
return tf.matmul(x, W) + b
def create_model(conv1_size,
conv1_out,
conv1_activation,
pool1_size,
conv2_size,
conv2_out,
conv2_activation,
pool2_size,
fc1_activation,
fc1_size,
optimizer,
log_learning_rate):
x = tf.placeholder(tf.float32, shape=[None, IMAGE_WIDTH ** 2])
y = tf.placeholder(tf.float32, shape=[None, OUTPUT_CLASSES])
keep_prob = tf.placeholder(tf.float32)
input_image = tf.reshape(x, [-1, IMAGE_WIDTH, IMAGE_WIDTH, 1])
conv1 = conv_layer(input_image, conv1_size, conv1_out, conv1_activation, pool1_size)
conv2 = conv_layer(conv1, conv2_size, conv2_out, conv2_activation, pool2_size)
_, conv2_height, conv2_width, conv2_features = conv2.get_shape()
flattened = tf.reshape(conv2,
[-1, conv2_height.value * conv2_width.value * conv2_features.value])
fc_1 = ACTIVATIONS[fc1_activation](fully_connected_layer(flattened, fc1_size))
fc_1_drop = tf.nn.dropout(fc_1, keep_prob)
y_conv = fully_connected_layer(fc_1_drop, OUTPUT_CLASSES)
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=y_conv))
train_step = OPTIMIZERS[optimizer](10 ** log_learning_rate).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
return x, y, y_conv, keep_prob, train_step, accuracy
def train_model(model, x_train, y_train, batch_size, dropout, epochs):
x, y, y_conv, keep_prob, train_step, _ = model
train_length = len(x_train)
for i in range(epochs):
indices = np.arange(train_length)
np.random.shuffle(indices)
for start in range(0, train_length, batch_size):
end = min(start + batch_size, train_length)
batch_indices = indices[start:end]
x_batch, y_batch = x_train[batch_indices], y_train[batch_indices]
train_step.run(feed_dict={x: x_batch, y: y_batch, keep_prob: dropout})
def evaluate_model(model, x_test, y_test):
x, y, y_conv, keep_prob, _, accuracy = model
return accuracy.eval(feed_dict={x: x_test, y: y_test, keep_prob: 1.0})
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--conv1_size',
type=int,
default=5)
parser.add_argument(
'--conv1_out',
type=int,
default=32)
parser.add_argument(
'--conv1_activation',
type=str,
default='relu')
parser.add_argument(
'--pool1_size',
type=int,
default=2)
parser.add_argument(
'--conv2_size',
type=int,
default=5
)
parser.add_argument(
'--conv2_out',
type=int,
default=64)
parser.add_argument(
'--conv2_activation',
type=str,
default='relu')
parser.add_argument(
'--pool2_size',
type=int,
default=2)
parser.add_argument(
'--dropout',
type=float,
default=0.2
)
parser.add_argument(
'--fc1_size',
type=int,
default=1024
)
parser.add_argument(
'--fc1_activation',
type=str,
default='sigmoid')
parser.add_argument(
'--optimizer',
type=str,
default='adam'
)
parser.add_argument(
'--log_learning_rate',
type=int,
default=-3
)
parser.add_argument(
'--batch_size',
type=int,
default=100
)
parser.add_argument(
'--epochs',
type=int,
default=1
)
args = parser.parse_args()
# Polyaxon
experiment = Experiment()
(x_train, y_train), (x_test, y_test) = load_mnist_data()
# Polyaxon
experiment.log_data_ref(data=x_train, data_name='x_train')
experiment.log_data_ref(data=y_train, data_name='y_train')
experiment.log_data_ref(data=x_test, data_name='x_test')
experiment.log_data_ref(data=y_test, data_name='y_test')
with tf.Session() as sess:
model = create_model(
conv1_size=args.conv1_size,
conv1_out=args.conv1_out,
conv1_activation=args.conv1_activation,
pool1_size=args.pool1_size,
conv2_size=args.conv2_size,
conv2_out=args.conv2_out,
conv2_activation=args.conv2_activation,
pool2_size=args.pool2_size,
fc1_activation=args.fc1_activation,
fc1_size=args.fc1_size,
optimizer=args.optimizer,
log_learning_rate=args.log_learning_rate)
sess.run(tf.global_variables_initializer())
train_model(model,
x_train,
y_train,
batch_size=args.batch_size,
dropout=args.dropout,
epochs=args.epochs)
accuracy = evaluate_model(model, x_test, y_test)
# Polyaxon
experiment.log_metrics(accuracy=accuracy)
|
gregmbi/polyaxon | core/polyaxon/polyflow/run/job.py | #!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import polyaxon_sdk
from marshmallow import fields, validate
from polyaxon.containers.names import MAIN_JOB_CONTAINER
from polyaxon.k8s import k8s_schemas
from polyaxon.polyflow.environment import EnvironmentSchema
from polyaxon.polyflow.init import InitSchema
from polyaxon.polyflow.run.kinds import V1RunKind
from polyaxon.schemas.base import BaseCamelSchema, BaseConfig
from polyaxon.schemas.fields.swagger import SwaggerField
class JobSchema(BaseCamelSchema):
kind = fields.Str(allow_none=True, validate=validate.Equal(V1RunKind.JOB))
environment = fields.Nested(EnvironmentSchema, allow_none=True)
connections = fields.List(fields.Str(), allow_none=True)
volumes = fields.List(SwaggerField(cls=k8s_schemas.V1Volume), allow_none=True)
init = fields.List(fields.Nested(InitSchema), allow_none=True)
sidecars = fields.List(SwaggerField(cls=k8s_schemas.V1Container), allow_none=True)
container = SwaggerField(
cls=k8s_schemas.V1Container,
defaults={"name": MAIN_JOB_CONTAINER},
allow_none=True,
)
@staticmethod
def schema_config():
return V1Job
class V1Job(BaseConfig, polyaxon_sdk.V1Job):
SCHEMA = JobSchema
IDENTIFIER = V1RunKind.JOB
REDUCED_ATTRIBUTES = [
"kind",
"container",
"environment",
"init",
"sidecars",
"connections",
"volumes",
]
|
gregmbi/polyaxon | core/polyaxon/polyflow/parallel/iterative.py | <filename>core/polyaxon/polyflow/parallel/iterative.py
#!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import polyaxon_sdk
from marshmallow import fields, validate
from polyaxon.containers.names import MAIN_JOB_CONTAINER
from polyaxon.k8s import k8s_schemas
from polyaxon.polyflow.early_stopping import EarlyStoppingSchema
from polyaxon.polyflow.parallel.matrix import MatrixSchema
from polyaxon.schemas.base import BaseCamelSchema, BaseConfig
from polyaxon.schemas.fields.ref_or_obj import RefOrObject
from polyaxon.schemas.fields.swagger import SwaggerField
class IterativeSchema(BaseCamelSchema):
kind = fields.Str(allow_none=True, validate=validate.Equal("iterative"))
num_iterations = RefOrObject(
fields.Int(required=True, validate=validate.Range(min=1)), required=True
)
concurrency = fields.Int(allow_none=True)
params = fields.Dict(
keys=fields.Str(), values=fields.Nested(MatrixSchema), allow_none=True
)
seed = RefOrObject(fields.Int(allow_none=True))
container = SwaggerField(
cls=k8s_schemas.V1Container,
defaults={"name": MAIN_JOB_CONTAINER},
allow_none=True,
)
early_stopping = fields.Nested(EarlyStoppingSchema, many=True, allow_none=True)
@staticmethod
def schema_config():
return V1Iterative
class V1Iterative(BaseConfig, polyaxon_sdk.V1Iterative):
IDENTIFIER = "iterative"
SCHEMA = IterativeSchema
REDUCED_ATTRIBUTES = ["params", "seed", "container", "earlyStopping", "concurrency"]
|
gregmbi/polyaxon | core/tests/test_polyaxonfile/test_polyaxonfile_pipelines.py | <gh_stars>0
#!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
from tests.utils import BaseTestCase
from polyaxon.config_reader import reader
from polyaxon.exceptions import PolyaxonSchemaError
from polyaxon.polyaxonfile import PolyaxonFile
from polyaxon.polyaxonfile.specs import (
CompiledOperationSpecification,
OperationSpecification,
)
from polyaxon.polyflow import V1CompiledOperation, V1RunKind
from polyaxon.polyflow.early_stopping import (
V1FailureEarlyStopping,
V1MetricEarlyStopping,
)
from polyaxon.polyflow.parallel import V1GridSearch, V1Hyperband, V1RandomSearch
from polyaxon.polyflow.parallel.matrix import V1HpChoice, V1HpLinSpace
from polyaxon.polyflow.run import V1Dag
@pytest.mark.polyaxonfile_mark
class TestPolyaxonfileWithPipelines(BaseTestCase):
def test_pipeline_with_no_ops_raises(self):
run_config = V1CompiledOperation.read(
[
reader.read(
os.path.abspath("tests/fixtures/pipelines/pipeline_with_no_ops.yml")
),
{"kind": "compiled_operation"},
]
)
with self.assertRaises(PolyaxonSchemaError):
CompiledOperationSpecification.apply_context(run_config)
def test_pipeline_with_no_components_raises(self):
run_config = V1CompiledOperation.read(
[
reader.read(
os.path.abspath(
"tests/fixtures/pipelines/pipeline_with_no_components.yml"
)
),
{"kind": "compiled_operation"},
]
)
with self.assertRaises(PolyaxonSchemaError):
CompiledOperationSpecification.apply_context(run_config)
def test_pipeline_ops_not_corresponding_to_components(self):
run_config = V1CompiledOperation.read(
[
reader.read(
os.path.abspath(
"tests/fixtures/pipelines/pipeline_ops_not_corresponding_to_components.yml"
)
),
{"kind": "compiled_operation"},
]
)
with self.assertRaises(PolyaxonSchemaError):
CompiledOperationSpecification.apply_context(run_config)
def test_cyclic_pipeline_raises(self):
run_config = V1CompiledOperation.read(
[
reader.read(
os.path.abspath("tests/fixtures/pipelines/cyclic_pipeline.yml")
),
{"kind": "compiled_operation"},
]
)
assert run_config.is_dag_run is True
assert run_config.has_pipeline is True
with self.assertRaises(PolyaxonSchemaError):
CompiledOperationSpecification.apply_context(run_config)
def test_cron_pipeline(self):
plx_file = PolyaxonFile(
os.path.abspath("tests/fixtures/pipelines/simple_cron_pipeline.yml")
)
# Get compiled_operation data
run_config = OperationSpecification.compile_operation(plx_file.config)
run_config = CompiledOperationSpecification.apply_context(run_config)
assert run_config.run is not None
assert len(run_config.run.operations) == 1
assert run_config.run.operations[0].name == "cron-task"
assert run_config.schedule is not None
assert run_config.schedule.kind == "cron"
assert run_config.schedule.cron == "0 0 * * *"
def test_interval_pipeline(self):
plx_file = PolyaxonFile(
os.path.abspath("tests/fixtures/pipelines/simple_recurrent_pipeline.yml")
)
# Get compiled_operation data
run_config = OperationSpecification.compile_operation(plx_file.config)
run_config = CompiledOperationSpecification.apply_context(run_config)
assert run_config.run is not None
assert len(run_config.run.operations) == 1
assert run_config.run.operations[0].name == "recurrent-task"
assert run_config.schedule is not None
assert run_config.schedule.kind == "interval"
assert run_config.schedule.start_at.year == 2019
assert run_config.schedule.frequency.seconds == 120
assert run_config.schedule.depends_on_past is True
assert run_config.schedule is not None
def test_sequential_pipeline(self):
run_config = V1CompiledOperation.read(
[
reader.read(
os.path.abspath(
"tests/fixtures/pipelines/simple_sequential_pipeline.yml"
)
),
{"kind": "compiled_operation"},
]
)
run_config = CompiledOperationSpecification.apply_context(run_config)
assert run_config.run is not None
assert len(run_config.run.operations) == 4
assert run_config.run.operations[0].name == "job1"
assert run_config.run.operations[1].name == "job2"
assert run_config.run.operations[1].dependencies == ["job1"]
assert run_config.run.operations[2].name == "experiment1"
assert run_config.run.operations[2].dependencies == ["job2"]
assert run_config.run.operations[3].name == "experiment2"
assert run_config.run.operations[3].dependencies == ["experiment1"]
dag_strategy = run_config.run
assert dag_strategy.sort_topologically(dag_strategy.dag) == [
["job1"],
["job2"],
["experiment1"],
["experiment2"],
]
assert run_config.schedule is None
def test_parallel_pipeline(self):
run_config = V1CompiledOperation.read(
[
reader.read(
os.path.abspath(
"tests/fixtures/pipelines/simple_parallel_pipeline.yml"
)
),
{"kind": "compiled_operation"},
]
)
run_config = CompiledOperationSpecification.apply_context(run_config)
assert len(run_config.run.operations) == 4
assert run_config.run.operations[0].name == "job1"
assert run_config.run.operations[0].dependencies is None
assert run_config.run.operations[1].name == "job2"
assert run_config.run.operations[1].dependencies is None
assert run_config.run.operations[2].name == "experiment1"
assert run_config.run.operations[2].dependencies is None
assert run_config.run.operations[3].name == "experiment2"
assert run_config.run.operations[3].dependencies is None
dag_strategy = run_config.run
assert set(dag_strategy.sort_topologically(dag_strategy.dag)[0]) == {
"job1",
"job2",
"experiment1",
"experiment2",
}
assert run_config.run.concurrency == 2
assert run_config.schedule is None
def test_dag_pipeline(self):
run_config = V1CompiledOperation.read(
[
reader.read(
os.path.abspath("tests/fixtures/pipelines/simple_dag_pipeline.yml")
),
{"kind": "compiled_operation"},
]
)
run_config = CompiledOperationSpecification.apply_context(run_config)
assert len(run_config.run.operations) == 5
assert run_config.run.operations[0].name == "job1"
assert run_config.run.operations[1].name == "experiment1"
assert run_config.run.operations[1].dependencies == ["job1"]
assert run_config.run.operations[2].name == "experiment2"
assert run_config.run.operations[2].dependencies == ["job1"]
assert run_config.run.operations[3].name == "experiment3"
assert run_config.run.operations[3].dependencies == ["job1"]
assert run_config.run.operations[4].name == "job2"
assert run_config.run.operations[4].dependencies == [
"experiment1",
"experiment2",
"experiment3",
]
dag_strategy = run_config.run
sorted_dag = dag_strategy.sort_topologically(dag_strategy.dag)
assert sorted_dag[0] == ["job1"]
assert set(sorted_dag[1]) == {"experiment1", "experiment2", "experiment3"}
assert sorted_dag[2] == ["job2"]
assert run_config.run.concurrency == 3
assert run_config.schedule is None
def test_build_run_pipeline(self):
run_config = V1CompiledOperation.read(
[
reader.read(
os.path.abspath("tests/fixtures/pipelines/build_run_pipeline.yml")
),
{"kind": "compiled_operation"},
]
)
run_config = CompiledOperationSpecification.apply_context(run_config)
assert len(run_config.run.operations) == 2
assert run_config.run.operations[0].name == "build"
assert run_config.run.operations[1].name == "run"
assert run_config.is_dag_run is True
assert run_config.has_pipeline is True
assert run_config.schedule is None
assert len(run_config.run.components) == 2
assert run_config.run.components[0].name == "experiment-template"
assert run_config.run.components[0].termination.to_dict() == {"maxRetries": 2}
assert run_config.run.components[0].run.to_dict() == {
"kind": V1RunKind.JOB,
"environment": {
"nodeSelector": {"polyaxon": "experiments"},
"serviceAccountName": "service",
"imagePullSecrets": ["secret1", "secret2"],
},
"container": {
"image": "{{ image }}",
"command": ["python3", "main.py"],
"args": "--lr={{ lr }}",
"name": "polyaxon-main",
"resources": {"requests": {"cpu": 1}},
},
}
assert run_config.run.components[1].name == "build-template"
assert run_config.run.components[1].run.container.image == "base"
assert run_config.run.operations[0].name == "build"
# Create a an op spec
run_config.run.set_op_component("run")
assert run_config.run.operations[1].has_component_reference is True
job_config = run_config.run.get_op_spec_by_index(1)
assert {p: job_config.params[p].to_dict() for p in job_config.params} == {
"image": {"value": "outputs.docker-image", "ref": "ops.build"},
"lr": {"value": 0.001},
}
run_config = OperationSpecification.compile_operation(job_config)
run_config.apply_params({"image": {"value": "foo"}, "lr": {"value": 0.001}})
run_config = CompiledOperationSpecification.apply_context(run_config)
run_config = CompiledOperationSpecification.apply_run_contexts(run_config)
assert run_config.termination.to_dict() == {"maxRetries": 2}
assert run_config.run.to_dict() == {
"kind": V1RunKind.JOB,
"environment": {
"nodeSelector": {"polyaxon": "experiments"},
"serviceAccountName": "service",
"imagePullSecrets": ["secret1", "secret2"],
},
"container": {
"image": "foo",
"command": ["python3", "main.py"],
"args": "--lr=0.001",
"name": "polyaxon-main",
"resources": {"requests": {"cpu": 1}},
},
}
def test_matrix_early_stopping_file_passes(self):
run_config = V1CompiledOperation.read(
[
reader.read(
os.path.abspath(
"tests/fixtures/pipelines/matrix_file_early_stopping.yml"
)
),
{"kind": "compiled_operation"},
]
)
run_config = CompiledOperationSpecification.apply_context(run_config)
assert run_config.run is not None
assert run_config.is_dag_run is True
assert run_config.has_pipeline is True
assert run_config.schedule is None
assert run_config.run.concurrency == 4
assert isinstance(run_config.run, V1Dag)
assert run_config.run.early_stopping[0].kind == "failure_early_stopping"
assert isinstance(run_config.run.early_stopping[0], V1FailureEarlyStopping)
assert len(run_config.run.early_stopping) == 1
assert run_config.run.kind == V1Dag.IDENTIFIER
assert len(run_config.run.operations) == 2
assert len(run_config.run.components) == 1
template_random = run_config.run.operations[1].parallel
assert isinstance(template_random, V1RandomSearch)
assert isinstance(template_random.params["lr"], V1HpLinSpace)
assert isinstance(template_random.params["loss"], V1HpChoice)
assert template_random.params["lr"].to_dict() == {
"kind": "linspace",
"value": {"start": 0.01, "stop": 0.1, "num": 5},
}
assert template_random.params["loss"].to_dict() == {
"kind": "choice",
"value": ["MeanSquaredError", "AbsoluteDifference"],
}
assert template_random.concurrency == 2
assert template_random.num_runs == 300
assert template_random.early_stopping[0].kind == "metric_early_stopping"
assert len(template_random.early_stopping) == 1
assert isinstance(template_random.early_stopping[0], V1MetricEarlyStopping)
def test_matrix_file_passess(self):
run_config = V1CompiledOperation.read(
[
reader.read(
os.path.abspath("tests/fixtures/pipelines/matrix_file.yml")
),
{"kind": "compiled_operation"},
]
)
run_config = CompiledOperationSpecification.apply_context(run_config)
assert run_config.version == 1.05
assert run_config.is_dag_run is True
assert run_config.has_pipeline is True
assert run_config.schedule is None
assert run_config.run.concurrency == 4
assert isinstance(run_config.run, V1Dag)
assert run_config.run.early_stopping is None
assert run_config.run.kind == V1Dag.IDENTIFIER
assert len(run_config.run.operations) == 2
assert len(run_config.run.components) == 1
template_hyperband = run_config.run.operations[1].parallel
assert isinstance(template_hyperband.params["lr"], V1HpLinSpace)
assert isinstance(template_hyperband.params["loss"], V1HpChoice)
assert template_hyperband.params["lr"].to_dict() == {
"kind": "linspace",
"value": {"start": 0.01, "stop": 0.1, "num": 5},
}
assert template_hyperband.params["loss"].to_dict() == {
"kind": "choice",
"value": ["MeanSquaredError", "AbsoluteDifference"],
}
assert template_hyperband.params["normal_rate"].to_dict() == {
"kind": "normal",
"value": {"loc": 0, "scale": 0.9},
}
assert template_hyperband.params["dropout"].to_dict() == {
"kind": "qloguniform",
"value": {"high": 0.8, "low": 0, "q": 0.1},
}
assert template_hyperband.params["activation"].to_dict() == {
"kind": "pchoice",
"value": [["relu", 0.1], ["sigmoid", 0.8]],
}
assert template_hyperband.params["model"].to_dict() == {
"kind": "choice",
"value": ["CDNA", "DNA", "STP"],
}
assert template_hyperband.concurrency == 2
assert isinstance(template_hyperband, V1Hyperband)
def test_matrix_file_passes_int_float_types(self):
run_config = V1CompiledOperation.read(
[
reader.read(
os.path.abspath(
"tests/fixtures/pipelines/matrix_file_with_int_float_types.yml"
)
),
{"kind": "compiled_operation"},
]
)
run_config = CompiledOperationSpecification.apply_context(run_config)
assert run_config.version == 1.05
assert run_config.is_dag_run is True
assert run_config.has_pipeline is True
assert run_config.schedule is None
assert run_config.run.concurrency == 4
assert isinstance(run_config.run, V1Dag)
assert run_config.run.early_stopping is None
assert run_config.run.kind == V1Dag.IDENTIFIER
assert len(run_config.run.operations) == 2
assert len(run_config.run.components) == 1
template_grid = run_config.run.operations[1].parallel
assert isinstance(template_grid, V1GridSearch)
assert isinstance(template_grid.params["param1"], V1HpChoice)
assert isinstance(template_grid.params["param2"], V1HpChoice)
assert template_grid.params["param1"].to_dict() == {
"kind": "choice",
"value": [1, 2],
}
assert template_grid.params["param2"].to_dict() == {
"kind": "choice",
"value": [3.3, 4.4],
}
assert template_grid.concurrency == 2
assert template_grid.early_stopping is None
|
gregmbi/polyaxon | core/polyaxon/client/project.py | <gh_stars>0
#!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import polyaxon_sdk
from polyaxon import settings
from polyaxon.client import PolyaxonClient
from polyaxon.client.decorators import check_no_op, check_offline
from polyaxon.env_vars.getters import get_project_full_name, get_project_info
from polyaxon.exceptions import PolyaxonClientException
from polyaxon.utils.query_params import get_query_params
class ProjectClient:
@check_no_op
def __init__(
self, owner=None, project=None, client=None,
):
if not owner and project:
owner, project = get_project_info(
get_project_full_name(owner=owner, project=project)
)
if not owner:
raise PolyaxonClientException("Please provide a valid project owner.")
self.client = client
if not (self.client or settings.CLIENT_CONFIG.is_offline):
self.client = PolyaxonClient()
self._owner = owner
self._project = project
self._project_data = polyaxon_sdk.V1Project()
@property
def owner(self):
return self._owner
@property
def project(self):
return self._project
@property
def project_data(self):
return self._project_data
@check_no_op
@check_offline
def refresh_data(self):
self._project_data = self.client.projects_v1.get_project(
self.owner, self.project
)
@check_no_op
@check_offline
def create(self, data: polyaxon_sdk.V1Project()):
self._project_data = self.client.projects_v1.create_project(self.owner, data)
self._project = self._project_data.name
return self._project_data
@check_no_op
@check_offline
def list(
self, query: str = None, sort: str = None, limit: int = None, offset: int = None
):
params = get_query_params(limit=limit, offset=offset, query=query, sort=sort)
return self.client.projects_v1.list_projects(self.owner, **params)
@check_no_op
@check_offline
def delete(self):
return self.client.projects_v1.delete_project(self.owner, self.project)
@check_no_op
@check_offline
def update(self, data: polyaxon_sdk.V1Project()):
return self.client.projects_v1.patch_project(
self.owner, self.project, body=data
)
|
gregmbi/polyaxon | core/polyaxon/streams/controllers/archived_logs.py | <reponame>gregmbi/polyaxon
#!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import os
from typing import List, Optional, Tuple
import aiofiles
from polyaxon.polyboard.logging import V1Log, V1Logs
from polyaxon.stores.manager import list_files
from polyaxon.streams.tasks.logs import download_logs_file
@functools.lru_cache(maxsize=30)
def get_logs_files(run_uuid: str) -> List[str]:
files = list_files(subpath="{}/logs".format(run_uuid))
if not files["files"]:
return []
return sorted([f for f in files["files"].keys()])
async def get_next_file(run_uuid: str, last_file: str = None) -> Optional[str]:
files = get_logs_files(run_uuid)
if not files:
return None
if not last_file:
return files[0]
i = 0
for i, f in enumerate(files):
if f == last_file:
break
i += 1
if i >= len(files):
return None
return files[i]
async def get_archived_operation_logs(
run_uuid: str, last_file: Optional[str]
) -> Tuple[List[V1Log], Optional[str]]:
logs = []
last_file = await get_next_file(run_uuid=run_uuid, last_file=last_file)
if not last_file:
return logs, last_file
logs_path = await download_logs_file(run_uuid=run_uuid, last_file=last_file)
if not os.path.exists(logs_path):
return logs, last_file
async with aiofiles.open(logs_path, mode="r") as f:
contents = await f.read()
if contents:
logs = V1Logs.read(contents)
logs = logs.logs
return logs, last_file
|
gregmbi/polyaxon | core/polyaxon/polypod/specs/contexts.py | #!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import namedtuple
from polyaxon import settings
from polyaxon.polyflow import V1Plugins
class PluginsContextsSpec(
namedtuple(
"PluginsContextsSpec",
"auth docker shm collect_logs collect_artifacts collect_resources sync_statuses",
)
):
@classmethod
def from_config(cls, config: V1Plugins) -> "PluginsContextsSpec":
auth = True
docker = False
shm = False
collect_logs = True
collect_artifacts = True
collect_resources = True
sync_statuses = True
if config:
if config.collect_logs is not None:
collect_logs = config.collect_logs
if config.collect_artifacts is not None:
collect_artifacts = config.collect_artifacts
if config.collect_resources is not None:
collect_resources = config.collect_resources
if config.sync_statuses is not None:
sync_statuses = config.sync_statuses
if config.auth is not None:
auth = config.auth
if config.docker is not None:
docker = config.docker
if config.shm is not None:
shm = config.shm
if settings.CLIENT_CONFIG.no_api:
auth = False
collect_logs = False
collect_artifacts = False
return cls(
auth=auth,
docker=docker,
shm=shm,
collect_logs=collect_logs,
collect_artifacts=collect_artifacts,
collect_resources=collect_resources,
sync_statuses=sync_statuses,
)
|
gregmbi/polyaxon | core/polyaxon/k8s/manager.py | <filename>core/polyaxon/k8s/manager.py
#!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.=
from kubernetes import client, config
from kubernetes.client.rest import ApiException
from polyaxon.exceptions import PolyaxonK8SError
from polyaxon.k8s import constants
from polyaxon.logger import logger
class K8SManager:
def __init__(self, k8s_config=None, namespace="default", in_cluster=False):
if not k8s_config:
if in_cluster:
config.load_incluster_config()
else:
config.load_kube_config()
self.api_client = None
else:
self.api_client = client.api_client.ApiClient(configuration=k8s_config)
self._k8s_api = None
self._k8s_batch_api = None
self._k8s_apps_api = None
self._k8s_beta_api = None
self._networking_v1_beta1_api = None
self._k8s_custom_object_api = None
self._k8s_version_api = None
self.namespace = namespace
self.in_cluster = in_cluster
@property
def k8s_api(self):
if not self._k8s_api:
self._k8s_api = client.CoreV1Api(self.api_client)
return self._k8s_api
@property
def k8s_batch_api(self):
if not self._k8s_batch_api:
self._k8s_batch_api = client.BatchV1Api(self.api_client)
return self._k8s_batch_api
@property
def k8s_apps_api(self):
if not self._k8s_apps_api:
self._k8s_apps_api = client.AppsV1Api(self.api_client)
return self._k8s_apps_api
@property
def k8s_beta_api(self):
if not self._k8s_beta_api:
self._k8s_beta_api = client.ExtensionsV1beta1Api(self.api_client)
return self._k8s_beta_api
@property
def networking_v1_beta1_api(self):
if not self._networking_v1_beta1_api:
self._networking_v1_beta1_api = client.NetworkingV1beta1Api(self.api_client)
return self._networking_v1_beta1_api
@property
def k8s_custom_object_api(self):
if not self._k8s_custom_object_api:
self._k8s_custom_object_api = client.CustomObjectsApi(self.api_client)
return self._k8s_custom_object_api
@property
def k8s_version_api(self):
if not self._k8s_version_api:
self._k8s_version_api = client.VersionApi(self.api_client)
return self._k8s_version_api
def set_namespace(self, namespace):
self.namespace = namespace
def get_version(self, reraise=False):
try:
return self.k8s_version_api.get_code().to_dict()
except ApiException as e:
logger.error("K8S error: {}".format(e))
if reraise:
raise PolyaxonK8SError("Connection error: %s" % e) from e
def _list_namespace_resource(self, labels, resource_api, reraise=False, **kwargs):
try:
res = resource_api(
namespace=self.namespace, label_selector=labels, **kwargs
)
return [p for p in res.items]
except ApiException as e:
logger.error("K8S error: {}".format(e))
if reraise:
raise PolyaxonK8SError("Connection error: %s" % e) from e
return []
def list_nodes(self, reraise=False):
try:
res = self.k8s_api.list_node()
return [p for p in res.items]
except ApiException as e:
logger.error("K8S error: {}".format(e))
if reraise:
raise PolyaxonK8SError("Connection error: %s" % e) from e
return []
def list_pods(self, labels, include_uninitialized=True, reraise=False):
return self._list_namespace_resource(
labels=labels,
resource_api=self.k8s_api.list_namespaced_pod,
reraise=reraise,
include_uninitialized=include_uninitialized,
)
def list_jobs(self, labels, include_uninitialized=True, reraise=False):
return self._list_namespace_resource(
labels=labels,
resource_api=self.k8s_batch_api.list_namespaced_job,
reraise=reraise,
include_uninitialized=include_uninitialized,
)
def list_custom_objects(self, labels, group, version, plural, reraise=False):
return self._list_namespace_resource(
labels=labels,
resource_api=self.k8s_custom_object_api.list_namespaced_custom_object,
reraise=reraise,
group=group,
version=version,
plural=plural,
)
def list_services(self, labels, reraise=False):
return self._list_namespace_resource(
labels=labels,
resource_api=self.k8s_api.list_namespaced_service,
reraise=reraise,
)
def list_deployments(self, labels, reraise=False):
return self._list_namespace_resource(
labels=labels,
resource_api=self.k8s_apps_api.list_namespaced_deployment,
reraise=reraise,
)
def list_ingresses(self, labels, reraise=False):
return self._list_namespace_resource(
labels=labels,
resource_api=self.networking_v1_beta1_api.list_namespaced_ingress,
reraise=reraise,
)
def update_node_labels(self, node, labels, reraise=False):
body = {"metadata": {"labels": labels}, "namespace": self.namespace}
try:
return self.k8s_api.patch_node(name=node, body=body)
except ApiException as e:
logger.error("K8S error: {}".format(e))
if reraise:
raise PolyaxonK8SError("Connection error: %s" % e) from e
def create_config_map(self, name, body):
resp = self.k8s_api.create_namespaced_config_map(
namespace=self.namespace, body=body
)
logger.debug("Config map `{}` was created".format(name))
return resp
def update_config_map(self, name, body):
resp = self.k8s_api.patch_namespaced_config_map(
name=name, namespace=self.namespace, body=body
)
logger.debug("Config map `{}` was patched".format(name))
return resp
def create_or_update_config_map(self, name, body, reraise=False):
try:
return self.create_config_map(name=name, body=body)
except ApiException:
try:
return self.update_config_map(name=name, body=body)
except ApiException as e:
if reraise:
raise PolyaxonK8SError("Connection error: %s" % e) from e
else:
logger.error("K8S error: {}".format(e))
def create_secret(self, name, body):
resp = self.k8s_api.create_namespaced_secret(
namespace=self.namespace, body=body
)
logger.debug("Secret `{}` was created".format(name))
return resp
def update_secret(self, name, body):
resp = self.k8s_api.patch_namespaced_secret(
name=name, namespace=self.namespace, body=body
)
logger.debug("Secret `{}` was patched".format(name))
return resp
def create_or_update_secret(self, name, body, reraise=False):
try:
return self.create_secret(name=name, body=body), True
except ApiException:
try:
return self.update_secret(name=name, body=body), False
except ApiException as e:
if reraise:
raise PolyaxonK8SError("Connection error: %s" % e) from e
else:
logger.error("K8S error: {}".format(e))
def create_service(self, name, body):
resp = self.k8s_api.create_namespaced_service(
namespace=self.namespace, body=body
)
logger.debug("Service `{}` was created".format(name))
return resp
def update_service(self, name, body):
resp = self.k8s_api.patch_namespaced_service(
name=name, namespace=self.namespace, body=body
)
logger.debug("Service `{}` was patched".format(name))
return resp
def create_or_update_service(self, name, body, reraise=False):
try:
return self.create_service(name=name, body=body), True
except ApiException:
try:
return self.update_service(name=name, body=body), False
except ApiException as e:
if reraise:
raise PolyaxonK8SError("Connection error: %s" % e) from e
else:
logger.error("K8S error: {}".format(e))
def create_pod(self, name, body):
resp = self.k8s_api.create_namespaced_pod(namespace=self.namespace, body=body)
logger.debug("Pod `{}` was created".format(name))
return resp
def update_pod(self, name, body):
resp = self.k8s_api.patch_namespaced_pod(
name=name, namespace=self.namespace, body=body
)
logger.debug("Pod `{}` was patched".format(name))
return resp
def create_or_update_pod(self, name, body, reraise=False):
try:
return self.create_pod(name=name, body=body), True
except ApiException:
try:
return self.update_pod(name=name, body=body), False
except ApiException as e:
if reraise:
raise PolyaxonK8SError("Connection error: %s" % e) from e
else:
logger.error("K8S error: {}".format(e))
def create_job(self, name, body):
resp = self.k8s_batch_api.create_namespaced_job(
namespace=self.namespace, body=body
)
logger.debug("Job `{}` was created".format(name))
return resp
def update_job(self, name, body):
resp = self.k8s_batch_api.patch_namespaced_job(
name=name, namespace=self.namespace, body=body
)
logger.debug("Job `{}` was patched".format(name))
return resp
def create_or_update_job(self, name, body, reraise=False):
try:
return self.create_job(name=name, body=body), True
except ApiException:
try:
return self.update_job(name=name, body=body), False
except ApiException as e:
if reraise:
raise PolyaxonK8SError("Connection error: %s" % e) from e
else:
logger.error("K8S error: {}".format(e))
def create_custom_object(self, name, group, version, plural, body):
resp = self.k8s_custom_object_api.create_namespaced_custom_object(
group=group,
version=version,
plural=plural,
namespace=self.namespace,
body=body,
)
logger.debug("Custom object `{}` was created".format(name))
return resp
def update_custom_object(self, name, group, version, plural, body):
resp = self.k8s_custom_object_api.patch_namespaced_custom_object(
name=name,
group=group,
version=version,
plural=plural,
namespace=self.namespace,
body=body,
)
logger.debug("Custom object `{}` was patched".format(name))
return resp
def create_or_update_custom_object(
self, name, group, version, plural, body, reraise=False
):
try:
return (
self.create_custom_object(
name=name, group=group, version=version, plural=plural, body=body
),
True,
)
except ApiException as e_create:
try:
return (
self.update_custom_object(
name=name,
group=group,
version=version,
plural=plural,
body=body,
),
False,
)
except ApiException as e:
if reraise:
raise PolyaxonK8SError(
"Connection error: creation %s - update %s" % (e_create, e)
) from e
else:
logger.error("K8S error: {}".format(e))
def create_deployment(self, name, body):
resp = self.k8s_apps_api.create_namespaced_deployment(
namespace=self.namespace, body=body
)
logger.debug("Deployment `{}` was created".format(name))
return resp
def update_deployment(self, name, body):
resp = self.k8s_apps_api.patch_namespaced_deployment(
name=name, namespace=self.namespace, body=body
)
logger.debug("Deployment `{}` was patched".format(name))
return resp
def create_or_update_deployment(self, name, body, reraise=False):
try:
return self.create_deployment(name=name, body=body), True
except ApiException:
try:
return self.update_deployment(name=name, body=body), False
except ApiException as e:
if reraise:
raise PolyaxonK8SError("Connection error: %s" % e) from e
else:
logger.error("K8S error: {}".format(e))
def create_volume(self, name, body):
resp = self.k8s_api.create_persistent_volume(body=body)
logger.debug("Persistent volume `{}` was created".format(name))
return resp
def update_volume(self, name, body):
resp = self.k8s_api.patch_persistent_volume(name=name, body=body)
logger.debug("Persistent volume `{}` was patched".format(name))
return resp
def create_or_update_volume(self, name, body, reraise=False):
try:
return self.create_volume(name=name, body=body), True
except ApiException:
try:
return self.update_service(name=name, body=body), False
except ApiException as e:
if reraise:
raise PolyaxonK8SError("Connection error: %s" % e) from e
else:
logger.error("K8S error: {}".format(e))
def create_volume_claim(self, name, body):
resp = self.k8s_api.create_namespaced_persistent_volume_claim(
namespace=self.namespace, body=body
)
logger.debug("Volume claim `{}` was created".format(name))
return resp
def update_volume_claim(self, name, body):
resp = self.k8s_api.patch_namespaced_persistent_volume_claim(
name=name, namespace=self.namespace, body=body
)
logger.debug("Volume claim `{}` was patched".format(name))
return resp
def create_or_update_volume_claim(self, name, body, reraise=False):
try:
return self.create_volume_claim(name=name, body=body), True
except ApiException:
try:
return self.update_volume_claim(name=name, body=body), False
except ApiException as e:
if reraise:
raise PolyaxonK8SError("Connection error: %s" % e) from e
else:
logger.error("K8S error: {}".format(e))
def create_ingress(self, name, body):
resp = self.networking_v1_beta1_api.create_namespaced_ingress(
namespace=self.namespace, body=body
)
logger.debug("ingress `{}` was created".format(name))
return resp
def update_ingress(self, name, body):
resp = self.networking_v1_beta1_api.patch_namespaced_ingress(
name=name, namespace=self.namespace, body=body
)
logger.debug("Ingress `{}` was patched".format(name))
return resp
def create_or_update_ingress(self, name, body, reraise=False):
try:
return self.create_ingress(name=name, body=body), True
except ApiException:
try:
return self.update_ingress(name=name, body=body), False
except ApiException as e:
if reraise:
raise PolyaxonK8SError("Connection error: %s" % e) from e
else:
logger.error("K8S error: {}".format(e))
def get_config_map(self, name, reraise=False):
try:
return self.k8s_api.read_namespaced_config_map(
name=name, namespace=self.namespace
)
except ApiException as e:
if reraise:
raise PolyaxonK8SError("Connection error: %s" % e) from e
return None
def get_secret(self, name, reraise=False):
try:
return self.k8s_api.read_namespaced_secret(
name=name, namespace=self.namespace
)
except ApiException as e:
if reraise:
raise PolyaxonK8SError("Connection error: %s" % e) from e
return None
def get_service(self, name, reraise=False):
try:
return self.k8s_api.read_namespaced_service(
name=name, namespace=self.namespace
)
except ApiException as e:
if reraise:
raise PolyaxonK8SError("Connection error: %s" % e) from e
return None
def get_pod(self, name, reraise=False):
try:
return self.k8s_api.read_namespaced_pod(name=name, namespace=self.namespace)
except ApiException as e:
if reraise:
raise PolyaxonK8SError("Connection error: %s" % e) from e
return None
def get_job(self, name, reraise=False):
try:
return self.k8s_batch_api.read_namespaced_job(
name=name, namespace=self.namespace
)
except ApiException as e:
if reraise:
raise PolyaxonK8SError("Connection error: %s" % e) from e
return None
def get_custom_object(self, name, group, version, plural):
return self.k8s_custom_object_api.get_namespaced_custom_object(
name=name,
group=group,
version=version,
plural=plural,
namespace=self.namespace,
)
def get_deployment(self, name, reraise=False):
try:
return self.k8s_apps_api.read_namespaced_deployment(
name=name, namespace=self.namespace
)
except ApiException as e:
if reraise:
raise PolyaxonK8SError("Connection error: %s" % e) from e
return None
def get_volume(self, name, reraise=False):
try:
return self.k8s_api.read_persistent_volume(name=name)
except ApiException as e:
if reraise:
raise PolyaxonK8SError("Connection error: %s" % e) from e
return None
def get_volume_claim(self, name, reraise=False):
try:
return self.k8s_api.read_namespaced_persistent_volume_claim(
name=name, namespace=self.namespace
)
except ApiException as e:
if reraise:
raise PolyaxonK8SError("Connection error: %s" % e) from e
return None
def get_ingress(self, name, reraise=False):
try:
return self.networking_v1_beta1_api.read_namespaced_ingress(
name=name, namespace=self.namespace
)
except ApiException as e:
if reraise:
raise PolyaxonK8SError("Connection error: %s" % e) from e
return None
def delete_config_map(self, name, reraise=False):
try:
self.k8s_api.delete_namespaced_config_map(
name=name,
namespace=self.namespace,
body=client.V1DeleteOptions(api_version=constants.K8S_API_VERSION_V1),
)
logger.debug("Config map `{}` Deleted".format(name))
except ApiException as e:
if reraise:
raise PolyaxonK8SError("Connection error: %s" % e) from e
else:
logger.debug("Config map `{}` was not found".format(name))
def delete_secret(self, name, reraise=False):
try:
self.k8s_api.delete_namespaced_secret(
name=name,
namespace=self.namespace,
body=client.V1DeleteOptions(api_version=constants.K8S_API_VERSION_V1),
)
logger.debug("secret `{}` Deleted".format(name))
except ApiException as e:
if reraise:
raise PolyaxonK8SError("Connection error: %s" % e) from e
else:
logger.debug("secret `{}` was not found".format(name))
def delete_service(self, name, reraise=False):
try:
self.k8s_api.delete_namespaced_service(
name=name,
namespace=self.namespace,
body=client.V1DeleteOptions(api_version=constants.K8S_API_VERSION_V1),
)
logger.debug("Service `{}` deleted".format(name))
except ApiException as e:
if reraise:
raise PolyaxonK8SError("Connection error: %s" % e) from e
else:
logger.debug("Service `{}` was not found".format(name))
def delete_pod(self, name, reraise=False):
try:
self.k8s_api.delete_namespaced_pod(
name=name,
namespace=self.namespace,
body=client.V1DeleteOptions(api_version=constants.K8S_API_VERSION_V1),
)
logger.debug("Pod `{}` deleted".format(name))
except ApiException as e:
if reraise:
raise PolyaxonK8SError("Connection error: %s" % e) from e
else:
logger.debug("Pod `{}` was not found".format(name))
def delete_job(self, name, reraise=False):
try:
self.k8s_batch_api.delete_namespaced_job(
name=name,
namespace=self.namespace,
body=client.V1DeleteOptions(api_version=constants.K8S_API_VERSION_V1),
)
logger.debug("Pod `{}` deleted".format(name))
except ApiException as e:
if reraise:
raise PolyaxonK8SError("Connection error: %s" % e) from e
else:
logger.debug("Pod `{}` was not found".format(name))
def delete_custom_object(self, name, group, version, plural):
self.k8s_custom_object_api.delete_namespaced_custom_object(
name=name,
group=group,
version=version,
plural=plural,
namespace=self.namespace,
body=client.V1DeleteOptions(),
)
logger.debug("Custom object `{}` deleted".format(name))
def delete_deployment(self, name, reraise=False):
try:
self.k8s_apps_api.delete_namespaced_deployment(
name=name,
namespace=self.namespace,
body=client.V1DeleteOptions(
api_version=constants.K8S_API_VERSION_APPS_V1,
propagation_policy="Foreground",
),
)
logger.debug("Deployment `{}` deleted".format(name))
except ApiException as e:
if reraise:
raise PolyaxonK8SError("Connection error: %s" % e) from e
else:
logger.debug("Deployment `{}` was not found".format(name))
def delete_volume(self, name, reraise=False):
try:
self.k8s_api.delete_persistent_volume(
name=name,
body=client.V1DeleteOptions(api_version=constants.K8S_API_VERSION_V1),
)
logger.debug("Volume `{}` Deleted".format(name))
except ApiException as e:
if reraise:
raise PolyaxonK8SError("Connection error: %s" % e) from e
else:
logger.debug("Volume `{}` was not found".format(name))
def delete_volume_claim(self, name, reraise=False):
try:
self.k8s_api.delete_namespaced_persistent_volume_claim(
name=name,
namespace=self.namespace,
body=client.V1DeleteOptions(api_version=constants.K8S_API_VERSION_V1),
)
logger.debug("Volume claim `{}` Deleted".format(name))
except ApiException as e:
if reraise:
raise PolyaxonK8SError("Connection error: %s" % e) from e
else:
logger.debug("Volume claim `{}` was not found".format(name))
def delete_ingress(self, name, reraise=False):
try:
self.networking_v1_beta1_api.delete_namespaced_ingress(
name=name,
namespace=self.namespace,
body=client.V1DeleteOptions(
api_version=constants.K8S_API_VERSION_NETWORKING_V1_BETA1,
propagation_policy="Foreground",
),
)
logger.debug("Ingress `{}` deleted".format(name))
except ApiException as e:
if reraise:
raise PolyaxonK8SError("Connection error: %s" % e) from e
else:
logger.debug("Ingress `{}` was not found".format(name))
def delete_pods(self, labels, include_uninitialized=True, reraise=False):
objs = self.list_pods(
labels=labels, include_uninitialized=include_uninitialized, reraise=reraise
)
for obj in objs:
self.delete_pod(name=obj.metadata.name, reraise=reraise)
def delete_jobs(self, labels, include_uninitialized=True, reraise=False):
objs = self.list_jobs(
labels=labels, include_uninitialized=include_uninitialized, reraise=reraise
)
for obj in objs:
self.delete_job(name=obj.metadata.name, reraise=reraise)
def delete_services(self, labels, reraise=False):
objs = self.list_services(labels=labels, reraise=reraise)
for obj in objs:
self.delete_service(name=obj.metadata.name, reraise=reraise)
def delete_deployments(self, labels, reraise=False):
objs = self.list_deployments(labels=labels, reraise=reraise)
for obj in objs:
self.delete_deployment(name=obj.metadata.name, reraise=reraise)
def delete_ingresses(self, labels, reraise=False):
objs = self.list_services(labels=labels, reraise=reraise)
for obj in objs:
self.delete_ingress(name=obj.metadata.name, reraise=reraise)
|
gregmbi/polyaxon | sdks/python/http_client/v1/polyaxon_sdk/models/v1_environment.py | <reponame>gregmbi/polyaxon<filename>sdks/python/http_client/v1/polyaxon_sdk/models/v1_environment.py
#!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
Polyaxon SDKs and REST API specification.
Polyaxon SDKs and REST API specification. # noqa: E501
The version of the OpenAPI document: 1.0.79
Contact: <EMAIL>
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from polyaxon_sdk.configuration import Configuration
class V1Environment(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
"labels": "dict(str, str)",
"annotations": "dict(str, str)",
"node_selector": "dict(str, str)",
"affinity": "V1Affinity",
"tolerations": "list[V1Toleration]",
"node_name": "str",
"service_account_name": "str",
"host_aliases": "list[V1HostAlias]",
"security_context": "V1PodSecurityContext",
"image_pull_secrets": "list[str]",
"host_network": "bool",
"dns_policy": "str",
"dns_config": "V1PodDNSConfig",
"scheduler_name": "str",
"priority_class_name": "str",
"priority": "int",
"restart_policy": "str",
}
attribute_map = {
"labels": "labels",
"annotations": "annotations",
"node_selector": "node_selector",
"affinity": "affinity",
"tolerations": "tolerations",
"node_name": "node_name",
"service_account_name": "service_account_name",
"host_aliases": "host_aliases",
"security_context": "security_context",
"image_pull_secrets": "image_pull_secrets",
"host_network": "host_network",
"dns_policy": "dns_policy",
"dns_config": "dns_config",
"scheduler_name": "scheduler_name",
"priority_class_name": "priority_class_name",
"priority": "priority",
"restart_policy": "restart_policy",
}
def __init__(
self,
labels=None,
annotations=None,
node_selector=None,
affinity=None,
tolerations=None,
node_name=None,
service_account_name=None,
host_aliases=None,
security_context=None,
image_pull_secrets=None,
host_network=None,
dns_policy=None,
dns_config=None,
scheduler_name=None,
priority_class_name=None,
priority=None,
restart_policy=None,
local_vars_configuration=None,
): # noqa: E501
"""V1Environment - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._labels = None
self._annotations = None
self._node_selector = None
self._affinity = None
self._tolerations = None
self._node_name = None
self._service_account_name = None
self._host_aliases = None
self._security_context = None
self._image_pull_secrets = None
self._host_network = None
self._dns_policy = None
self._dns_config = None
self._scheduler_name = None
self._priority_class_name = None
self._priority = None
self._restart_policy = None
self.discriminator = None
if labels is not None:
self.labels = labels
if annotations is not None:
self.annotations = annotations
if node_selector is not None:
self.node_selector = node_selector
if affinity is not None:
self.affinity = affinity
if tolerations is not None:
self.tolerations = tolerations
if node_name is not None:
self.node_name = node_name
if service_account_name is not None:
self.service_account_name = service_account_name
if host_aliases is not None:
self.host_aliases = host_aliases
if security_context is not None:
self.security_context = security_context
if image_pull_secrets is not None:
self.image_pull_secrets = image_pull_secrets
if host_network is not None:
self.host_network = host_network
if dns_policy is not None:
self.dns_policy = dns_policy
if dns_config is not None:
self.dns_config = dns_config
if scheduler_name is not None:
self.scheduler_name = scheduler_name
if priority_class_name is not None:
self.priority_class_name = priority_class_name
if priority is not None:
self.priority = priority
if restart_policy is not None:
self.restart_policy = restart_policy
@property
def labels(self):
"""Gets the labels of this V1Environment. # noqa: E501
:return: The labels of this V1Environment. # noqa: E501
:rtype: dict(str, str)
"""
return self._labels
@labels.setter
def labels(self, labels):
"""Sets the labels of this V1Environment.
:param labels: The labels of this V1Environment. # noqa: E501
:type: dict(str, str)
"""
self._labels = labels
@property
def annotations(self):
"""Gets the annotations of this V1Environment. # noqa: E501
:return: The annotations of this V1Environment. # noqa: E501
:rtype: dict(str, str)
"""
return self._annotations
@annotations.setter
def annotations(self, annotations):
"""Sets the annotations of this V1Environment.
:param annotations: The annotations of this V1Environment. # noqa: E501
:type: dict(str, str)
"""
self._annotations = annotations
@property
def node_selector(self):
"""Gets the node_selector of this V1Environment. # noqa: E501
:return: The node_selector of this V1Environment. # noqa: E501
:rtype: dict(str, str)
"""
return self._node_selector
@node_selector.setter
def node_selector(self, node_selector):
"""Sets the node_selector of this V1Environment.
:param node_selector: The node_selector of this V1Environment. # noqa: E501
:type: dict(str, str)
"""
self._node_selector = node_selector
@property
def affinity(self):
"""Gets the affinity of this V1Environment. # noqa: E501
:return: The affinity of this V1Environment. # noqa: E501
:rtype: V1Affinity
"""
return self._affinity
@affinity.setter
def affinity(self, affinity):
"""Sets the affinity of this V1Environment.
:param affinity: The affinity of this V1Environment. # noqa: E501
:type: V1Affinity
"""
self._affinity = affinity
@property
def tolerations(self):
"""Gets the tolerations of this V1Environment. # noqa: E501
Optional Tolerations to apply. # noqa: E501
:return: The tolerations of this V1Environment. # noqa: E501
:rtype: list[V1Toleration]
"""
return self._tolerations
@tolerations.setter
def tolerations(self, tolerations):
"""Sets the tolerations of this V1Environment.
Optional Tolerations to apply. # noqa: E501
:param tolerations: The tolerations of this V1Environment. # noqa: E501
:type: list[V1Toleration]
"""
self._tolerations = tolerations
@property
def node_name(self):
"""Gets the node_name of this V1Environment. # noqa: E501
Optional NodeName is a request to schedule this pod onto a specific node. If it is non-empty, the scheduler simply schedules this pod onto that node, assuming that it fits resource requirements. # noqa: E501
:return: The node_name of this V1Environment. # noqa: E501
:rtype: str
"""
return self._node_name
@node_name.setter
def node_name(self, node_name):
"""Sets the node_name of this V1Environment.
Optional NodeName is a request to schedule this pod onto a specific node. If it is non-empty, the scheduler simply schedules this pod onto that node, assuming that it fits resource requirements. # noqa: E501
:param node_name: The node_name of this V1Environment. # noqa: E501
:type: str
"""
self._node_name = node_name
@property
def service_account_name(self):
"""Gets the service_account_name of this V1Environment. # noqa: E501
:return: The service_account_name of this V1Environment. # noqa: E501
:rtype: str
"""
return self._service_account_name
@service_account_name.setter
def service_account_name(self, service_account_name):
"""Sets the service_account_name of this V1Environment.
:param service_account_name: The service_account_name of this V1Environment. # noqa: E501
:type: str
"""
self._service_account_name = service_account_name
@property
def host_aliases(self):
"""Gets the host_aliases of this V1Environment. # noqa: E501
Optional HostAliases is an optional list of hosts and IPs that will be injected into the pod spec. # noqa: E501
:return: The host_aliases of this V1Environment. # noqa: E501
:rtype: list[V1HostAlias]
"""
return self._host_aliases
@host_aliases.setter
def host_aliases(self, host_aliases):
"""Sets the host_aliases of this V1Environment.
Optional HostAliases is an optional list of hosts and IPs that will be injected into the pod spec. # noqa: E501
:param host_aliases: The host_aliases of this V1Environment. # noqa: E501
:type: list[V1HostAlias]
"""
self._host_aliases = host_aliases
@property
def security_context(self):
"""Gets the security_context of this V1Environment. # noqa: E501
:return: The security_context of this V1Environment. # noqa: E501
:rtype: V1PodSecurityContext
"""
return self._security_context
@security_context.setter
def security_context(self, security_context):
"""Sets the security_context of this V1Environment.
:param security_context: The security_context of this V1Environment. # noqa: E501
:type: V1PodSecurityContext
"""
self._security_context = security_context
@property
def image_pull_secrets(self):
"""Gets the image_pull_secrets of this V1Environment. # noqa: E501
:return: The image_pull_secrets of this V1Environment. # noqa: E501
:rtype: list[str]
"""
return self._image_pull_secrets
@image_pull_secrets.setter
def image_pull_secrets(self, image_pull_secrets):
"""Sets the image_pull_secrets of this V1Environment.
:param image_pull_secrets: The image_pull_secrets of this V1Environment. # noqa: E501
:type: list[str]
"""
self._image_pull_secrets = image_pull_secrets
@property
def host_network(self):
"""Gets the host_network of this V1Environment. # noqa: E501
Host networking requested for this workflow pod. Default to false. # noqa: E501
:return: The host_network of this V1Environment. # noqa: E501
:rtype: bool
"""
return self._host_network
@host_network.setter
def host_network(self, host_network):
"""Sets the host_network of this V1Environment.
Host networking requested for this workflow pod. Default to false. # noqa: E501
:param host_network: The host_network of this V1Environment. # noqa: E501
:type: bool
"""
self._host_network = host_network
@property
def dns_policy(self):
"""Gets the dns_policy of this V1Environment. # noqa: E501
Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'. # noqa: E501
:return: The dns_policy of this V1Environment. # noqa: E501
:rtype: str
"""
return self._dns_policy
@dns_policy.setter
def dns_policy(self, dns_policy):
"""Sets the dns_policy of this V1Environment.
Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'. # noqa: E501
:param dns_policy: The dns_policy of this V1Environment. # noqa: E501
:type: str
"""
self._dns_policy = dns_policy
@property
def dns_config(self):
"""Gets the dns_config of this V1Environment. # noqa: E501
:return: The dns_config of this V1Environment. # noqa: E501
:rtype: V1PodDNSConfig
"""
return self._dns_config
@dns_config.setter
def dns_config(self, dns_config):
"""Sets the dns_config of this V1Environment.
:param dns_config: The dns_config of this V1Environment. # noqa: E501
:type: V1PodDNSConfig
"""
self._dns_config = dns_config
@property
def scheduler_name(self):
"""Gets the scheduler_name of this V1Environment. # noqa: E501
:return: The scheduler_name of this V1Environment. # noqa: E501
:rtype: str
"""
return self._scheduler_name
@scheduler_name.setter
def scheduler_name(self, scheduler_name):
"""Sets the scheduler_name of this V1Environment.
:param scheduler_name: The scheduler_name of this V1Environment. # noqa: E501
:type: str
"""
self._scheduler_name = scheduler_name
@property
def priority_class_name(self):
"""Gets the priority_class_name of this V1Environment. # noqa: E501
If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default. # noqa: E501
:return: The priority_class_name of this V1Environment. # noqa: E501
:rtype: str
"""
return self._priority_class_name
@priority_class_name.setter
def priority_class_name(self, priority_class_name):
"""Sets the priority_class_name of this V1Environment.
If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default. # noqa: E501
:param priority_class_name: The priority_class_name of this V1Environment. # noqa: E501
:type: str
"""
self._priority_class_name = priority_class_name
@property
def priority(self):
"""Gets the priority of this V1Environment. # noqa: E501
The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority. # noqa: E501
:return: The priority of this V1Environment. # noqa: E501
:rtype: int
"""
return self._priority
@priority.setter
def priority(self, priority):
"""Sets the priority of this V1Environment.
The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority. # noqa: E501
:param priority: The priority of this V1Environment. # noqa: E501
:type: int
"""
self._priority = priority
@property
def restart_policy(self):
"""Gets the restart_policy of this V1Environment. # noqa: E501
:return: The restart_policy of this V1Environment. # noqa: E501
:rtype: str
"""
return self._restart_policy
@restart_policy.setter
def restart_policy(self, restart_policy):
"""Sets the restart_policy of this V1Environment.
:param restart_policy: The restart_policy of this V1Environment. # noqa: E501
:type: str
"""
self._restart_policy = restart_policy
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value)
)
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict")
else item,
value.items(),
)
)
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1Environment):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1Environment):
return True
return self.to_dict() != other.to_dict()
|
gregmbi/polyaxon | core/polyaxon/sidecar/outputs/artifacts.py | <reponame>gregmbi/polyaxon
#!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from datetime import datetime
from typing import Optional
from polyaxon.containers.contexts import CONTEXT_MOUNT_ARTIFACTS_FORMAT
from polyaxon.stores.manager import get_artifacts_connection, upload_file_or_dir
from polyaxon.utils.tz_utils import now
def sync_artifacts(last_check: Optional[datetime], run_uuid: str):
new_check = now()
connection_type = get_artifacts_connection()
path_from = CONTEXT_MOUNT_ARTIFACTS_FORMAT.format(run_uuid)
# check if there's a path to sync
if os.path.exists(path_from):
path_to = os.path.join(connection_type.store_path, run_uuid)
upload_file_or_dir(
path_from=path_from,
path_to=path_to,
is_file=False,
workers=5,
last_time=last_check,
connection_type=connection_type,
)
return new_check
|
gregmbi/polyaxon | core/tests/test_polyflow/test_conds.py | <reponame>gregmbi/polyaxon
#!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from marshmallow import ValidationError
from tests.utils import BaseTestCase
from polyaxon.polyflow.conditions import ConditionSchema, V1IoCond, V1StatusCond
@pytest.mark.polyflow_mark
class TestCondsConfigs(BaseTestCase):
def test_status_cond(self):
config_dict = {"foo": "bar", "operation": "foo", "trigger": "done"}
with self.assertRaises(ValidationError):
V1StatusCond.from_dict(config_dict)
config_dict = {"kind": "foo", "operation": "foo", "trigger": "done"}
with self.assertRaises(ValidationError):
V1StatusCond.from_dict(config_dict)
config_dict = {"operation": "foo", "trigger": "done"}
V1StatusCond.from_dict(config_dict)
config_dict = {"kind": "status", "operation": "foo", "trigger": "done"}
V1StatusCond.from_dict(config_dict)
def test_io_cond(self):
config_dict = {
"operation": "foo",
"param": "done",
"trigger": ["op1.done", "foo"],
}
with self.assertRaises(ValidationError):
V1IoCond.from_dict(config_dict)
config_dict = {"kind": "io", "param": "done", "trigger": ["foo"]}
with self.assertRaises(ValidationError):
V1IoCond.from_dict(config_dict)
config_dict = {"kind": "foo", "param": "done", "trigger": "true"}
with self.assertRaises(ValidationError):
V1IoCond.from_dict(config_dict)
config_dict = {"kind": "outputs", "param": "foo", "trigger": "done"}
with self.assertRaises(ValidationError):
V1IoCond.from_dict(config_dict)
config_dict = {"param": "name", "trigger": ["true"]}
with self.assertRaises(ValidationError):
V1IoCond.from_dict(config_dict)
config_dict1 = {"param": "build.outputs.image", "trigger": "value1"}
V1IoCond.from_dict(config_dict1)
config_dict2 = {
"kind": "io",
"param": "build.outputs.image",
"trigger": "~value1|value2|value3",
}
V1IoCond.from_dict(config_dict2)
def test_conds(self):
configs = [
{"kind": "status", "operation": "foo", "trigger": "done"},
{"kind": "io", "param": "foo.outputs.param1", "trigger": "~value1|value2"},
]
ConditionSchema().load(configs, many=True)
|
gregmbi/polyaxon | core/tests/test_proxies/test_gateway/test_base.py | <filename>core/tests/test_proxies/test_gateway/test_base.py
#!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from tests.utils import BaseTestCase
from polyaxon import settings
from polyaxon.proxies.schemas.gateway import get_base_config
@pytest.mark.proxies_mark
class TestGatewayBase(BaseTestCase):
SET_PROXIES_SETTINGS = True
def test_gateway_base_config(self):
expected = """
listen 80;
error_log /polyaxon/logs/error.log warn;
gzip on;
gzip_disable "msie6";
gzip_types *;
charset utf-8;
client_max_body_size 4G;
client_body_buffer_size 50m;
client_body_in_file_only clean;
sendfile on;
send_timeout 600;
keepalive_timeout 600;
uwsgi_read_timeout 600;
uwsgi_send_timeout 600;
client_header_timeout 600;
proxy_read_timeout 600;
error_page 500 502 503 504 /50x.html;
error_page 401 403 /permission.html;
error_page 404 /404.html;
location /streams/ {
proxy_pass http://polyaxon-polyaxon-streams;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
location ~ /services/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) {
proxy_pass http://plx-operation-$4.$1.svc.cluster.local;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
location ~ /rewrite-services/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) {
rewrite_log on;
rewrite ^/services/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) /$5 break;
proxy_pass http://plx-operation-$4.$1.svc.cluster.local;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
location / {
proxy_pass http://polyaxon-polyaxon-api;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
""" # noqa
settings.PROXIES_CONFIG.auth_enabled = False
settings.PROXIES_CONFIG.dns_use_resolver = False
assert get_base_config() == expected
def test_gateway_base_config_with_auth_and_dns(self):
expected = """
listen 80;
error_log /polyaxon/logs/error.log warn;
gzip on;
gzip_disable "msie6";
gzip_types *;
charset utf-8;
client_max_body_size 4G;
client_body_buffer_size 50m;
client_body_in_file_only clean;
sendfile on;
send_timeout 600;
keepalive_timeout 600;
uwsgi_read_timeout 600;
uwsgi_send_timeout 600;
client_header_timeout 600;
proxy_read_timeout 600;
error_page 500 502 503 504 /50x.html;
error_page 401 403 /permission.html;
error_page 404 /404.html;
location = /auth/v1/ {
resolver coredns.kube-system.svc.cluster.local valid=5s;
proxy_pass http://polyaxon-polyaxon-api;
proxy_pass_request_body off;
proxy_set_header Content-Length "";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Origin-URI $request_uri;
proxy_set_header X-Origin-Method $request_method;
internal;
}
location /streams/ {
auth_request /auth/v1/;
auth_request_set $auth_status $upstream_status;
resolver coredns.kube-system.svc.cluster.local valid=5s;
proxy_pass http://polyaxon-polyaxon-streams;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
location ~ /services/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) {
auth_request /auth/v1/;
auth_request_set $auth_status $upstream_status;
resolver coredns.kube-system.svc.cluster.local valid=5s;
proxy_pass http://plx-operation-$4.$1.svc.cluster.local;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
location ~ /rewrite-services/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) {
auth_request /auth/v1/;
auth_request_set $auth_status $upstream_status;
resolver coredns.kube-system.svc.cluster.local valid=5s;
rewrite_log on;
rewrite ^/services/v1/([-_.:\w]+)/([-_.:\w]+)/([-_.:\w]+)/runs/([-_.:\w]+)/(.*) /$5 break;
proxy_pass http://plx-operation-$4.$1.svc.cluster.local;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
location / {
resolver coredns.kube-system.svc.cluster.local valid=5s;
proxy_pass http://polyaxon-polyaxon-api;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_buffering off;
}
""" # noqa
settings.PROXIES_CONFIG.auth_enabled = True
settings.PROXIES_CONFIG.auth_use_resolver = True
settings.PROXIES_CONFIG.dns_use_resolver = True
settings.PROXIES_CONFIG.dns_prefix = "coredns.kube-system"
settings.PROXIES_CONFIG.dns_custom_cluster = "cluster.local"
assert get_base_config() == expected
|
gregmbi/polyaxon | core/polyaxon/polyflow/component/component_reference.py | <reponame>gregmbi/polyaxon
#!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from polyaxon.polyflow.component.component import ComponentSchema, V1Component
from polyaxon.polyflow.references import (
DagReferenceSchema,
HubReferenceSchema,
PathReferenceSchema,
UrlReferenceSchema,
V1DagReference,
V1HubReference,
V1PathReference,
V1UrlReference,
)
from polyaxon.schemas.base import BaseOneOfSchema
class ComponentReferenceSchema(BaseOneOfSchema):
TYPE_FIELD = "kind"
TYPE_FIELD_REMOVE = False
SCHEMAS = {
V1Component.IDENTIFIER: ComponentSchema,
V1DagReference.IDENTIFIER: DagReferenceSchema,
V1HubReference.IDENTIFIER: HubReferenceSchema,
V1PathReference.IDENTIFIER: PathReferenceSchema,
V1UrlReference.IDENTIFIER: UrlReferenceSchema,
}
|
gregmbi/polyaxon | core/polyaxon/schemas/fields/indexed_dict.py | #!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import Mapping
from marshmallow import fields
class IndexedDict(fields.Dict):
def _validated(self, value):
"""Check the dict has an index or raise a :exc:`ValidationError` if an error occurs."""
if not (isinstance(value, Mapping) or "index" in value):
self.fail("invalid")
|
gregmbi/polyaxon | core/polyaxon/api.py | <filename>core/polyaxon/api.py
#!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
VERSION_V1 = "v1"
API_V1 = "api/{}".format(VERSION_V1)
STREAMS_V1 = "streams/{}".format(VERSION_V1)
SERVICES_V1 = "services/{}".format(VERSION_V1)
REWRITE_SERVICES_V1 = "rewrite-services/{}".format(VERSION_V1)
WS_V1 = "ws/{}".format(VERSION_V1)
AUTH_V1 = "auth/{}".format(VERSION_V1)
POLYAXON_CLOUD = "cloud.polyaxon.com"
|
gregmbi/polyaxon | core/polyaxon/proxies/schemas/locations.py | #!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from polyaxon import settings
from polyaxon.proxies.schemas.base import get_config
STATIC_LOCATION_OPTIONS = """
location /static/ {{
alias /polyaxon/static/;
autoindex on;
expires 30d;
add_header Cache-Control private;
}}
"""
def get_static_location_config():
return get_config(options=STATIC_LOCATION_OPTIONS, indent=0)
TMP_LOCATION_OPTIONS = """
location /tmp/ {{
alias /tmp/;
expires 0;
add_header Cache-Control private;
internal;
}}
"""
def get_tmp_location_config():
return get_config(options=TMP_LOCATION_OPTIONS, indent=0)
ARCHIVES_LOCATION_OPTIONS = """
location {archives_root} {{
alias {archives_root};
expires 0;
add_header Cache-Control private;
internal;
}}
"""
def get_archives_root_location_config():
return get_config(
options=ARCHIVES_LOCATION_OPTIONS,
indent=0,
archives_root=settings.PROXIES_CONFIG.archive_root.rstrip("/") + "/",
)
def get_api_locations_config():
config = [get_static_location_config(), get_tmp_location_config()]
return "\n".join(config)
def get_streams_locations_config():
config = [get_tmp_location_config(), get_archives_root_location_config()]
return "\n".join(config)
|
gregmbi/polyaxon | core/tests/test_polyflow/test_environments/test_environments.py | #!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from tests.utils import BaseTestCase, assert_equal_dict
from polyaxon.polyflow.environment import V1Environment
@pytest.mark.environment_mark
class TestEnvironmentsConfigs(BaseTestCase):
def test_environment_config(self):
# Resources
config_dict = {"labels": {"foo": "bar"}}
config = V1Environment.from_dict(config_dict)
assert_equal_dict(config_dict, config.to_dict())
# Add node selectors
config_dict["nodeSelector"] = {"polyaxon.com": "master"}
config = V1Environment.from_dict(config_dict)
assert_equal_dict(config_dict, config.to_dict())
# Add affinity
config_dict["affinity"] = {
"nodeAffinity": {"requiredDuringSchedulingIgnoredDuringExecution": {}}
}
config = V1Environment.from_dict(config_dict)
assert config.affinity.node_affinity == {
"requiredDuringSchedulingIgnoredDuringExecution": {}
}
assert_equal_dict(config_dict, config.to_dict())
# Add labels
config_dict["labels"] = {"foo": "bar"}
config = V1Environment.from_dict(config_dict)
assert_equal_dict(config_dict, config.to_dict())
# Add annotations
config_dict["annotations"] = {"foo": "bar"}
config = V1Environment.from_dict(config_dict)
assert_equal_dict(config_dict, config.to_dict())
# Add toleration
config_dict["tolerations"] = [{"key": "key", "operator": "Exists"}]
config = V1Environment.from_dict(config_dict)
assert_equal_dict(config_dict, config.to_dict())
# Add service_account
config_dict["serviceAccountName"] = "service_account_name"
config = V1Environment.from_dict(config_dict)
assert_equal_dict(config_dict, config.to_dict())
# Add image_pull_secrets
config_dict["imagePullSecrets"] = ["secret1", "secret2"]
config = V1Environment.from_dict(config_dict)
assert_equal_dict(config_dict, config.to_dict())
# Add security context per job
config_dict["securityContext"] = {"runAsUser": 1000, "runAsGroup": 3000}
config = V1Environment.from_dict(config_dict)
assert config.security_context.run_as_user == 1000
assert config.security_context.run_as_group == 3000
assert_equal_dict(config_dict, config.to_dict())
# Add restart_policy
config_dict["restartPolicy"] = "never"
config = V1Environment.from_dict(config_dict)
assert_equal_dict(config_dict, config.to_dict())
|
gregmbi/polyaxon | core/polyaxon/client/run.py | <filename>core/polyaxon/client/run.py
#!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import time
from typing import Dict, Iterator, List, Tuple, Union
import click
import polyaxon_sdk
from polyaxon_sdk.rest import ApiException
from urllib3.exceptions import HTTPError
from polyaxon import settings
from polyaxon.cli.errors import handle_cli_error
from polyaxon.client import PolyaxonClient
from polyaxon.client.decorators import check_no_op, check_offline
from polyaxon.env_vars.getters import (
get_project_full_name,
get_project_or_local,
get_run_info,
get_run_or_local,
)
from polyaxon.exceptions import PolyaxonClientException
from polyaxon.lifecycle import LifeCycle, V1StatusCondition
from polyaxon.polyboard.artifacts import V1ArtifactKind, V1RunArtifact
from polyaxon.polyboard.logging.handler import get_logs_handler
from polyaxon.stores.polyaxon_store import PolyaxonStore
from polyaxon.utils.code_reference import get_code_reference
from polyaxon.utils.formatting import Printer
from polyaxon.utils.query_params import get_logs_params, get_query_params
from polyaxon.utils.validation import validate_tags
from polyaxon.utils.hashing import hash_value
class RunClient:
@check_no_op
def __init__(
self, owner=None, project=None, run_uuid=None, client=None,
):
try:
owner, project = get_project_or_local(
get_project_full_name(owner=owner, project=project)
)
except PolyaxonClientException:
pass
if project is None:
if settings.CLIENT_CONFIG.is_managed:
owner, project, _run_uuid = get_run_info()
run_uuid = run_uuid or _run_uuid
else:
raise PolyaxonClientException("Please provide a valid project.")
if not owner or not project:
raise PolyaxonClientException("Please provide a valid project with owner.")
self.client = client
if not (self.client or settings.CLIENT_CONFIG.is_offline):
self.client = PolyaxonClient()
self._owner = owner
self._project = project
self._run_uuid = get_run_or_local(run_uuid)
self._run_data = polyaxon_sdk.V1Run()
self._namespace = None
@property
def status(self):
return self._run_data.status
@property
def namespace(self):
if self._namespace:
return self._namespace
self._namespace = self.get_namespace()
return self._namespace
@property
def owner(self):
return self._owner
@property
def project(self):
return self._project
@property
def run_uuid(self):
return self._run_uuid
@property
def run_data(self):
return self._run_data
@check_no_op
def get_inputs(self):
"""
Returns all the run inputs/params.
"""
return self._run_data.inputs
@check_no_op
def get_outputs(self):
"""
Returns all the run inputs/params.
"""
return self._run_data.outputs
@check_no_op
@check_offline
def refresh_data(self):
self._run_data = self.client.runs_v1.get_run(
self.owner, self.project, self.run_uuid
)
@check_no_op
@check_offline
def update(self, data: Union[Dict, polyaxon_sdk.V1Run]):
return self._update(data=data, async_req=False)
@check_no_op
@check_offline
def _update(self, data: Union[Dict, polyaxon_sdk.V1Run], async_req: bool = True):
self.client.runs_v1.patch_run(
owner=self.owner,
project=self.project,
run_uuid=self.run_uuid,
body=data,
async_req=async_req,
)
@check_no_op
@check_offline
def log_status(self, status, reason=None, message=None):
status_condition = V1StatusCondition(
type=status, status=True, reason=reason, message=message
)
self.client.runs_v1.create_run_status(
owner=self.owner,
project=self.project,
uuid=self.run_uuid,
body={"condition": status_condition},
async_req=True,
)
@check_no_op
@check_offline
def get_statuses(self, last_status: str = None):
try:
response = self.client.runs_v1.get_run_statuses(
self.owner, self.project, self.run_uuid
)
if not last_status:
return response.status, response.status_conditions
if last_status == response.status:
return last_status, []
_conditions = []
for c in reversed(response.status_conditions):
if c.type == last_status:
break
_conditions.append(c)
return response.status, reversed(_conditions)
except (ApiException, HTTPError) as e:
raise PolyaxonClientException("Api error: %s" % e) from e
@check_no_op
@check_offline
def watch_statuses(self):
def watch_run_statuses() -> Tuple[str, Iterator]:
last_status = None
while not LifeCycle.is_done(last_status):
last_status, _conditions = self.get_statuses(last_status)
yield last_status, _conditions
time.sleep(settings.CLIENT_CONFIG.watch_interval)
for status, conditions in watch_run_statuses():
self._run_data.status = status
yield status, conditions
@check_no_op
@check_offline
def get_logs(self, last_file=None, last_time=None):
params = get_logs_params(last_file=last_file, last_time=last_time)
return self.client.runs_v1.get_run_logs(
self.namespace, self.owner, self.project, self.run_uuid, **params
)
@check_no_op
@check_offline
def watch_logs(self, hide_time: bool = False, all_info: bool = False):
return get_run_logs(
client=self, hide_time=hide_time, all_info=all_info, follow=True
)
@check_no_op
@check_offline
def get_events(self, kind: V1ArtifactKind, names: List[str], orient: str = None):
return self.client.runs_v1.get_run_events(
self.namespace,
self.owner,
self.project,
self.run_uuid,
kind=kind,
names=names,
orient=orient,
)
@check_no_op
@check_offline
def get_multi_runs_events(
self,
kind: V1ArtifactKind,
runs: List[str],
names: List[str],
orient: str = None,
):
return self.client.runs_v1.get_multi_run_events(
self.namespace,
self.owner,
self.project,
kind=kind,
names=names,
runs=runs,
orient=orient,
)
@check_no_op
@check_offline
def get_artifact(self, path: str, stream: bool = True):
return self.client.runs_v1.get_run_artifact(
namespace=self.namespace,
owner=self.owner,
project=self.project,
uuid=self.run_uuid,
path=path,
stream=stream,
_preload_content=True,
)
@check_no_op
@check_offline
def download_artifact(self, path: str):
url = "{host}/streams/v1/{namespace}/{owner}/{project}/runs/{uuid}/artifact".format(
host=self.client.config.host,
namespace=self.namespace,
owner=self.owner,
project=self.project,
uuid=self.run_uuid,
)
return PolyaxonStore(client=self).download_file(url=url, path=path)
@check_no_op
@check_offline
def download_artifacts(
self,
path: str = "",
path_to: str = None,
untar: bool = True,
delete_tar: bool = True,
extract_path: str = None,
):
url = "{host}/streams/v1/{namespace}/{owner}/{project}/runs/{uuid}/artifacts".format(
host=self.client.config.host,
namespace=self.namespace,
owner=self.owner,
project=self.project,
uuid=self.run_uuid,
)
return PolyaxonStore(client=self).download_file(
url=url,
path=path,
untar=untar,
delete_tar=delete_tar,
extract_path=extract_path,
)
@check_no_op
@check_offline
def get_artifacts_tree(self, path: str = ""):
return self.client.runs_v1.get_run_artifacts_tree(
namespace=self.namespace,
owner=self.owner,
project=self.project,
uuid=self.run_uuid,
path=path,
)
@check_no_op
@check_offline
def stop(self):
self.client.runs_v1.stop_run(
self.owner, self.project, self.run_uuid,
)
@check_no_op
@check_offline
def invalidate(self):
self.client.runs_v1.invalidate_run(
self.owner, self.project, self.run_uuid,
)
@check_no_op
@check_offline
def restart(self, override_config=None, copy: bool = False, **kwargs):
body = polyaxon_sdk.V1Run(content=override_config)
if copy:
return self.client.runs_v1.copy_run(
self.owner, self.project, self.run_uuid, body=body, **kwargs
)
else:
return self.client.runs_v1.restart_run(
self.owner, self.project, self.run_uuid, body=body, **kwargs
)
@check_no_op
@check_offline
def resume(self, override_config=None, **kwargs):
body = polyaxon_sdk.V1Run(content=override_config)
return self.client.runs_v1.resume_run(
self.owner, self.project, self.run_uuid, body=body, **kwargs
)
@check_no_op
@check_offline
def set_description(self, description, async_req=True):
self._update({"description": description}, async_req=async_req)
self._run_data.description = description
@check_no_op
@check_offline
def set_name(self, name, async_req=True):
self._update({"name": name}, async_req=async_req)
self._run_data.name = name
@check_no_op
@check_offline
def log_inputs(self, reset=False, async_req=True, **inputs):
patch_dict = {"inputs": inputs}
if reset is False:
patch_dict["merge"] = True
self._run_data.inputs = inputs
else:
self._run_data.inputs.update(inputs)
self._update(patch_dict, async_req=async_req)
@check_no_op
@check_offline
def log_outputs(self, reset=False, async_req=True, **outputs):
patch_dict = {"outputs": outputs}
if reset is False:
patch_dict["merge"] = True
self._run_data.outputs = outputs
else:
self._run_data.inputs.update(outputs)
self._update(patch_dict, async_req=async_req)
@check_no_op
@check_offline
def log_tags(self, tags, reset=False, async_req=True):
patch_dict = {"tags": validate_tags(tags)}
if reset is False:
patch_dict["merge"] = True
self._update(patch_dict, async_req=async_req)
@check_no_op
@check_offline
def start(self):
self.log_status(polyaxon_sdk.V1Statuses.RUNNING, "Job is running")
self._run_data.status = polyaxon_sdk.V1Statuses.RUNNING
@check_no_op
@check_offline
def end(self, status, message=None, traceback=None):
if self.status in LifeCycle.DONE_VALUES:
return
self.log_status(status=status, reason=message, message=traceback)
self._run_data.status = status
time.sleep(
0.1
) # Just to give the opportunity to the worker to pick the message
@check_no_op
@check_offline
def log_succeeded(self):
self.end("succeeded", "Job has succeeded")
@check_no_op
@check_offline
def log_stopped(self):
self.end("stopped", "Job is stopped")
@check_no_op
@check_offline
def log_failed(self, message=None, traceback=None):
self.end(status="failed", message=message, traceback=traceback)
@check_no_op
@check_offline
def log_code_ref(self):
code_ref = get_code_reference()
if code_ref:
artifact_run = V1RunArtifact(
name=code_ref.get("commit"),
kind=V1ArtifactKind.CODEREF,
summary=code_ref,
is_input=True,
)
self.log_artifact_lineage(body=artifact_run)
@check_no_op
@check_offline
def log_data_ref(self, name: str, data):
if name:
artifact_run = V1RunArtifact(
name=name,
kind=V1ArtifactKind.DATA,
summary={"hash": hash_value(data)},
is_input=True,
)
self.log_artifact_lineage(body=artifact_run)
@check_no_op
@check_offline
def log_artifact_lineage(self, body: Union[V1RunArtifact, List[V1RunArtifact]]):
self.client.runs_v1.create_run_artifacts_lineage(
self.owner, self.project, self.run_uuid, body=body,
)
@check_no_op
@check_offline
def get_namespace(self):
return self.client.runs_v1.get_run_namespace(
self.owner, self.project, self.run_uuid,
).namespace
@check_no_op
@check_offline
def delete(self):
return self.client.runs_v1.delete_run(self.owner, self.project, self.run_uuid)
@check_no_op
@check_offline
def list(
self, query: str = None, sort: str = None, limit: int = None, offset: int = None
):
params = get_query_params(limit=limit, offset=offset, query=query, sort=sort)
return self.client.runs_v1.list_runs(self.owner, self.project, **params)
@check_no_op
@check_offline
def list_children(
self, query: str = None, sort: str = None, limit: int = None, offset: int = None
):
params = get_query_params(limit=limit, offset=offset, query=query, sort=sort)
query = params.get("query")
query = query + "&" if query else "?"
query += "pipeline={}".format(self.run_uuid)
params["query"] = query
return self.client.runs_v1.list_runs(self.owner, self.project, **params)
def get_run_logs(
client: RunClient,
hide_time: bool = False,
all_info: bool = False,
follow: bool = False,
):
def get_logs(last_file=None, last_time=None):
try:
response = client.get_logs(last_file=last_file, last_time=last_time)
get_logs_handler(show_timestamp=not hide_time, all_info=all_info)(response)
return response
except (ApiException, HTTPError) as e:
if not follow:
handle_cli_error(
e,
message="Could not get logs for run `{}`.".format(client.run_uuid),
)
sys.exit(1)
def handle_status(last_status: str = None):
if not last_status:
return {"status": None}
click.echo(
"{}".format(
Printer.add_status_color({"status": last_status}, status_key="status")[
"status"
]
)
)
return last_status
def handle_logs():
is_done = False
last_time = None
last_file = None
_status = None
last_status, _ = client.get_statuses()
while not LifeCycle.is_done(last_status) and not LifeCycle.is_running(
last_status
):
time.sleep(1)
last_status, _ = client.get_statuses()
if _status != last_status:
_status = handle_status(last_status)
while not is_done:
response = get_logs(last_time=last_time, last_file=last_file)
if response:
last_time = response.last_time
last_file = response.last_file
else:
last_time = None
last_file = None
# Follow logic
if not any([last_file, last_time]):
if follow:
last_status, _ = client.get_statuses()
if _status != last_status:
_status = handle_status(last_status)
is_done = LifeCycle.is_done(last_status)
else:
is_done = True
if last_time and not follow:
is_done = True
if not is_done:
if last_file:
time.sleep(1)
else:
time.sleep(settings.CLIENT_CONFIG.watch_interval)
handle_logs()
|
gregmbi/polyaxon | core/polyaxon/polyflow/schedule/execute.py | <filename>core/polyaxon/polyflow/schedule/execute.py
#!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import polyaxon_sdk
from marshmallow import fields, validate
from polyaxon.schemas.base import BaseCamelSchema, BaseConfig
class ExactTimeScheduleSchema(BaseCamelSchema):
kind = fields.Str(allow_none=True, validate=validate.Equal("exact_time"))
start_at = fields.DateTime(required=True)
@staticmethod
def schema_config():
return V1ExactTimeSchedule
class V1ExactTimeSchedule(BaseConfig, polyaxon_sdk.V1ExactTimeSchedule):
SCHEMA = ExactTimeScheduleSchema
IDENTIFIER = "exact_time"
|
gregmbi/polyaxon | core/polyaxon/polyflow/run/spark/spark.py | #!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import polyaxon_sdk
from marshmallow import fields, validate
from polyaxon.k8s import k8s_schemas
from polyaxon.polyflow.run.kinds import V1RunKind
from polyaxon.polyflow.run.spark.replica import SparkReplicaSchema
from polyaxon.schemas.base import BaseCamelSchema, BaseConfig
from polyaxon.schemas.fields.swagger import SwaggerField
class V1SparkType(polyaxon_sdk.V1SparkType):
VALUES = {
polyaxon_sdk.V1SparkType.JAVA,
polyaxon_sdk.V1SparkType.SCALA,
polyaxon_sdk.V1SparkType.PYTHON,
polyaxon_sdk.V1SparkType.R,
}
class V1SparkDeploy(polyaxon_sdk.SparkDeployMode):
VALUES = {
polyaxon_sdk.SparkDeployMode.CLUSTER,
polyaxon_sdk.SparkDeployMode.CLIENT,
polyaxon_sdk.SparkDeployMode.IN_CLUSTER_CLIENT,
}
class SparkSchema(BaseCamelSchema):
kind = fields.Str(allow_none=True, validate=validate.Equal(V1RunKind.SPARK))
connections = fields.List(fields.Str(), allow_none=True)
volumes = fields.List(SwaggerField(cls=k8s_schemas.V1Volume), allow_none=True)
type = fields.Str(allow_none=True, validate=validate.OneOf(V1SparkType.VALUES))
sparkVersion = fields.Str(allow_none=True)
pythonVersion = fields.Str(
allow_none=True, validate=validate.OneOf(V1SparkDeploy.VALUES)
)
deployMode = fields.Str(allow_none=True)
main_class = fields.Str(allow_none=True)
main_application_file = fields.Str(allow_none=True)
arguments = fields.Str(allow_none=True)
hadoop_conf = fields.Str(allow_none=True)
spark_conf = fields.Str(allow_none=True)
hadoop_config_map = fields.Str(allow_none=True)
spark_config_map = fields.Str(allow_none=True)
executor = fields.Nested(SparkReplicaSchema, allow_none=True)
driver = fields.Nested(SparkReplicaSchema, allow_none=True)
@staticmethod
def schema_config():
return V1Spark
class V1Spark(BaseConfig, polyaxon_sdk.V1Spark):
SCHEMA = SparkSchema
IDENTIFIER = V1RunKind.SPARK
REDUCED_ATTRIBUTES = [
"kind",
"connections",
"volumes",
"type",
"sparkVersion",
"pythonVersion",
"deployMode",
"mainClass",
"mainApplicationFile",
"arguments",
"hadoopConf",
"sparkConf",
"sparkConfigMap",
"hadoopConfigMap",
"executor",
"driver",
]
|
gregmbi/polyaxon | core/polyaxon/polyflow/environment/__init__.py | #!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import polyaxon_sdk
from marshmallow import fields
from polyaxon.k8s import k8s_schemas
from polyaxon.schemas.base import BaseCamelSchema, BaseConfig
from polyaxon.schemas.fields.swagger import SwaggerField
class EnvironmentSchema(BaseCamelSchema):
labels = fields.Dict(values=fields.Str(), keys=fields.Str(), allow_none=True)
annotations = fields.Dict(values=fields.Str(), keys=fields.Str(), allow_none=True)
node_selector = fields.Dict(values=fields.Str(), keys=fields.Str(), allow_none=True)
affinity = SwaggerField(cls=k8s_schemas.V1Affinity, allow_none=True)
tolerations = fields.List(
SwaggerField(cls=k8s_schemas.V1Toleration), allow_none=True
)
node_name = fields.Str(allow_none=True)
service_account_name = fields.Str(allow_none=True)
host_aliases = fields.List(
SwaggerField(cls=k8s_schemas.V1HostAlias), allow_none=True
)
security_context = SwaggerField(cls=k8s_schemas.V1SecurityContext, allow_none=True)
image_pull_secrets = fields.List(fields.Str(), allow_none=True)
host_network = fields.Bool(allow_none=True)
dns_policy = fields.Str(allow_none=True)
dns_config = SwaggerField(cls=k8s_schemas.V1PodDNSConfig, allow_none=True)
scheduler_name = fields.Str(allow_none=True)
priority_class_name = fields.Str(allow_none=True)
priority = fields.Int(allow_none=True)
restart_policy = fields.Str(allow_none=True)
@staticmethod
def schema_config():
return V1Environment
class V1Environment(BaseConfig, polyaxon_sdk.V1Environment):
"""
Pod environment config.
"""
IDENTIFIER = "environment"
SCHEMA = EnvironmentSchema
REDUCED_ATTRIBUTES = [
"labels",
"annotations",
"nodeSelector",
"affinity",
"tolerations",
"nodeName",
"serviceAccountName",
"hostAliases",
"securityContext",
"imagePullSecrets",
"hostNetwork",
"dnsPolicy",
"dnsConfig",
"schedulerName",
"priorityClassName",
"priority",
"restartPolicy",
]
|
gregmbi/polyaxon | core/polyaxon/polypod/sidecar/container.py | #!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional
from polyaxon.containers.containers import V1PolyaxonSidecarContainer
from polyaxon.exceptions import PolypodException
from polyaxon.k8s import k8s_schemas
from polyaxon.polypod.common.env_vars import (
get_connection_env_var,
get_env_from_config_map,
get_env_from_secret,
get_items_from_config_map,
get_items_from_secret,
)
from polyaxon.polypod.common.mounts import (
get_mount_from_resource,
get_mount_from_store,
get_mounts,
)
from polyaxon.polypod.main.container import MAIN_JOB_CONTAINER
from polyaxon.polypod.sidecar.env_vars import get_sidecar_env_vars
from polyaxon.polypod.specs.contexts import PluginsContextsSpec
from polyaxon.schemas.types import V1ConnectionType
from polyaxon.utils.list_utils import to_list
SIDECAR_CONTAINER = "polyaxon-sidecar"
def get_sidecar_args(container_id: str, sleep_interval: int, sync_interval: int) -> str:
return (
"polyaxon sidecar "
"--container_id={} "
"--sleep_interval={} "
"--sync_interval={}".format(container_id, sleep_interval, sync_interval)
)
def get_sidecar_container(
polyaxon_sidecar: V1PolyaxonSidecarContainer,
env: List[k8s_schemas.V1EnvVar],
artifacts_store: V1ConnectionType,
contexts: PluginsContextsSpec,
run_path: Optional[str],
) -> Optional[k8s_schemas.V1Container]:
if artifacts_store and not contexts:
raise PolypodException(
"Logs/artifacts store was passed and contexts was not passed."
)
has_artifacts = artifacts_store and contexts.collect_artifacts
has_logs = artifacts_store and contexts.collect_logs
if not has_logs and not has_artifacts:
# No sidecar
return None
if (has_artifacts or has_logs) and not run_path:
raise PolypodException("Logs store / outputs store must have a run_path.")
env = get_sidecar_env_vars(
env_vars=env,
job_container_name=MAIN_JOB_CONTAINER,
artifacts_store_name=artifacts_store.name,
)
volume_mounts = get_mounts(
use_auth_context=contexts.auth,
use_artifacts_context=has_artifacts,
use_docker_context=False,
use_shm_context=False,
)
sidecar_args = get_sidecar_args(
container_id=MAIN_JOB_CONTAINER,
sleep_interval=polyaxon_sidecar.sleep_interval,
sync_interval=polyaxon_sidecar.sync_interval,
)
env_from = []
secret = None
if artifacts_store.is_bucket:
secret = artifacts_store.get_secret()
volume_mounts += to_list(
get_mount_from_resource(resource=secret), check_none=True
)
env += to_list(get_items_from_secret(secret=secret), check_none=True)
env_from += to_list(get_env_from_secret(secret=secret), check_none=True)
config_map = artifacts_store.get_config_map()
volume_mounts += to_list(
get_mount_from_resource(resource=config_map), check_none=True
)
env += to_list(
get_items_from_config_map(config_map=config_map), check_none=True
)
env_from += to_list(
get_env_from_config_map(config_map=config_map), check_none=True
)
else:
volume_mounts += to_list(
get_mount_from_store(store=artifacts_store), check_none=True
)
env += to_list(
get_connection_env_var(connection=artifacts_store, secret=secret),
check_none=True,
)
return k8s_schemas.V1Container(
name=SIDECAR_CONTAINER,
image=polyaxon_sidecar.get_image(),
image_pull_policy=polyaxon_sidecar.image_pull_policy,
command=["/bin/bash", "-c"],
args=[sidecar_args],
env=env,
env_from=env_from,
resources=polyaxon_sidecar.get_resources(),
volume_mounts=volume_mounts,
)
|
gregmbi/polyaxon | core/polyaxon/polyaxonfile/check.py | #!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from collections import OrderedDict
from polyaxon.cli.errors import handle_cli_error
from polyaxon.config_reader.spec import ConfigSpec
from polyaxon.polyaxonfile.manager import PolyaxonFile
from polyaxon.polyaxonfile.params import parse_params
from polyaxon.utils import constants
from polyaxon.utils.formatting import Printer, dict_tabulate
from polyaxon.utils.list_utils import to_list
def check_polyaxonfile(
polyaxonfile=None,
python_module=None,
params=None,
profile=None,
queue=None,
nocache=None,
log=True,
):
if not any([polyaxonfile, python_module]):
polyaxonfile = PolyaxonFile.check_default_path(path=".")
if not any([polyaxonfile, python_module]):
polyaxonfile = ""
polyaxonfile = to_list(polyaxonfile)
exists = [os.path.isfile(f) for f in polyaxonfile]
parsed_params = None
if params:
parsed_params = parse_params(params)
if not any(exists) and not python_module:
Printer.print_error(
"Polyaxonfile is not present, "
"please run {}".format(constants.INIT_COMMAND)
)
sys.exit(1)
if python_module:
config = ConfigSpec.get_from(python_module)
return config.read()
try:
plx_file = PolyaxonFile(polyaxonfile)
plx_file = plx_file.get_op_specification(
params=parsed_params, profile=profile, queue=queue, nocache=nocache
)
if log:
Printer.print_success("Polyaxonfile valid")
return plx_file
except Exception as e:
handle_cli_error(e, message="Polyaxonfile is not valid.")
sys.exit(1)
def check_polyaxonfile_kind(specification, kind):
if specification.kind != kind:
Printer.print_error(
"Your polyaxonfile must be of kind: `{}`, "
"received: `{}`.".format(kind, specification.kind)
)
sys.exit(-1)
def get_parallel_info(kind, concurrency, early_stopping=False, **kwargs):
info = OrderedDict()
info["Parallel kind"] = kind.lower()
info["Concurrency"] = (
"{} runs".format("sequential")
if concurrency == 1
else "{} concurrent runs".format(concurrency)
)
info["Early stopping"] = "activated" if early_stopping else "deactivated"
if "num_runs" in kwargs:
info["Num of runs to create"] = kwargs["num_runs"]
dict_tabulate(info)
|
gregmbi/polyaxon | core/polyaxon/polypod/init/git.py | <filename>core/polyaxon/polypod/init/git.py
#!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional
from polyaxon.containers.containers import V1PolyaxonInitContainer
from polyaxon.containers.contexts import CONTEXT_MOUNT_ARTIFACTS
from polyaxon.containers.names import INIT_GIT_CONTAINER
from polyaxon.exceptions import PolypodException
from polyaxon.k8s import k8s_schemas
from polyaxon.polypod.common import constants
from polyaxon.polypod.common.containers import patch_container
from polyaxon.polypod.common.env_vars import (
get_connection_env_var,
get_env_from_config_map,
get_env_from_secret,
get_items_from_config_map,
get_items_from_secret,
)
from polyaxon.polypod.common.mounts import (
get_auth_context_mount,
get_connections_context_mount,
get_mount_from_resource,
)
from polyaxon.polypod.common.volumes import get_volume_name
from polyaxon.polypod.specs.contexts import PluginsContextsSpec
from polyaxon.schemas.types import V1ConnectionType
from polyaxon.utils.list_utils import to_list
def get_repo_context_args(
name: str, url: str, revision: str, mount_path: str, connection: str = None
) -> List[str]:
if not name:
raise PolypodException("A repo name is required to create a repo context.")
if not url:
raise PolypodException("A repo url is required to create a repo context.")
args = ["--repo_path={}/{}".format(mount_path, name), "--url={}".format(url)]
if revision:
args.append("--revision={}".format(revision))
if connection:
args.append("--connection={}".format(connection))
return args
def get_git_init_container(
polyaxon_init: V1PolyaxonInitContainer,
connection: V1ConnectionType,
contexts: PluginsContextsSpec,
container: Optional[k8s_schemas.V1Container] = None,
env: List[k8s_schemas.V1EnvVar] = None,
mount_path: str = None,
track: bool = False,
) -> k8s_schemas.V1Container:
if not connection:
raise PolypodException("A connection is required to create a repo context.")
if not container:
container = k8s_schemas.V1Container(
name=INIT_GIT_CONTAINER.format(connection.name)
)
volume_name = (
get_volume_name(mount_path)
if mount_path
else constants.CONTEXT_VOLUME_ARTIFACTS
)
mount_path = mount_path or CONTEXT_MOUNT_ARTIFACTS
volume_mounts = [
get_connections_context_mount(name=volume_name, mount_path=mount_path)
]
if contexts and contexts.auth:
volume_mounts.append(get_auth_context_mount(read_only=True))
env = to_list(env, check_none=True)
env_from = []
secret = connection.get_secret()
if secret:
volume_mounts += to_list(
get_mount_from_resource(resource=secret), check_none=True
)
env += to_list(get_items_from_secret(secret=secret), check_none=True)
env_from = to_list(get_env_from_secret(secret=secret), check_none=True)
env += to_list(
get_connection_env_var(connection=connection, secret=secret), check_none=True
)
config_map = connection.get_config_map()
if config_map:
volume_mounts += to_list(
get_mount_from_resource(resource=config_map), check_none=True
)
env += to_list(
get_items_from_config_map(config_map=config_map), check_none=True
)
env_from = to_list(
get_env_from_config_map(config_map=config_map), check_none=True
)
args = get_repo_context_args(
name=connection.name,
url=connection.schema.url,
revision=connection.schema.revision,
mount_path=mount_path,
connection=connection.name if track else None,
)
return patch_container(
container=container,
name=INIT_GIT_CONTAINER.format(connection.name),
image=polyaxon_init.get_image(),
image_pull_policy=polyaxon_init.image_pull_policy,
command=["polyaxon", "initializer", "git"],
args=args,
env=env,
env_from=env_from,
volume_mounts=volume_mounts,
resources=polyaxon_init.get_resources(),
)
|
gregmbi/polyaxon | core/polyaxon/sidecar/logging/monitor.py | #!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
from typing import Any, Iterable, Optional
from kubernetes.client.rest import ApiException
from polyaxon.client import RunClient
from polyaxon.exceptions import PolyaxonK8SError
from polyaxon.k8s.manager import K8SManager
from polyaxon.polyboard.logging import V1Log
from polyaxon.utils.tz_utils import now
def query_logs(
k8s_manager: "K8SManager",
pod_id: str,
container_id: str,
stream: bool = False,
since_seconds: int = None,
) -> Any:
params = {}
if stream:
params = {"follow": True, "_preload_content": False}
if since_seconds:
params = {"since_seconds": since_seconds}
return k8s_manager.k8s_api.read_namespaced_pod_log(
pod_id, k8s_manager.namespace, container=container_id, timestamps=True, **params
)
def process_log_line(log_line: str):
if not isinstance(log_line, str):
log_line = log_line.decode("utf-8")
return V1Log.process_log_line(
value=log_line.strip(), node=None, pod=None, container=None
)
def stream_logs(
k8s_manager: "K8SManager", pod_id: str, container_id: str
) -> Iterable[str]:
raw = None
retries = 0
no_logs = True
while retries < 3 and no_logs:
try:
raw = query_logs(
k8s_manager=k8s_manager,
pod_id=pod_id,
container_id=container_id,
stream=True,
)
except (PolyaxonK8SError, ApiException):
retries += 1
if not raw:
yield ""
else:
for log_line in raw.stream():
if log_line:
yield process_log_line(log_line=log_line)
def process_logs(
k8s_manager: K8SManager,
pod_id: str,
container_id: str,
filepath: str,
since_seconds: int,
) -> bool:
logs = None
retries = 0
no_logs = True
while retries < 3 and no_logs:
try:
logs = query_logs(
k8s_manager=k8s_manager,
pod_id=pod_id,
container_id=container_id,
since_seconds=since_seconds,
)
no_logs = False
except (PolyaxonK8SError, ApiException):
retries += 1
if not logs:
return False
log_lines = []
for log_line in logs.split("\n"):
if log_line:
log_lines.append(process_log_line(log_line=log_line))
# Creating the new file
if not log_lines:
return False
with open(filepath, "w+") as destination:
destination.write("\n".join(log_lines))
return True
def sync_logs(
k8s_manager: K8SManager,
client: RunClient,
last_check: Optional[datetime],
pod_id: str,
container_id: str,
owner: str,
project: str,
run_uuid: str,
):
new_check = now()
since_seconds = None
if last_check:
since_seconds = (new_check - last_check).total_seconds()
if since_seconds < 1:
return last_check
filepath = str(new_check.timestamp())
created = process_logs(
k8s_manager=k8s_manager,
pod_id=pod_id,
container_id=container_id,
since_seconds=since_seconds,
filepath=filepath,
)
if created:
client.client.upload_run_logs(
owner, project, run_uuid, uploadfile=filepath, path=filepath
)
return new_check
return last_check
|
gregmbi/polyaxon | core/polyaxon/polyflow/parallel/__init__.py | #!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from polyaxon.polyflow.parallel.bayes import (
AcquisitionFunctions,
BayesSchema,
GaussianProcessConfig,
GaussianProcessesKernels,
UtilityFunctionConfig,
V1Bayes,
)
from polyaxon.polyflow.parallel.grid_search import GridSearchSchema, V1GridSearch
from polyaxon.polyflow.parallel.hyperband import HyperbandSchema, V1Hyperband
from polyaxon.polyflow.parallel.hyperopt import HyperoptSchema, V1Hyperopt
from polyaxon.polyflow.parallel.iterative import IterativeSchema, V1Iterative
from polyaxon.polyflow.parallel.mapping import MappingSchema, V1Mapping
from polyaxon.polyflow.parallel.random_search import RandomSearchSchema, V1RandomSearch
from polyaxon.schemas.base import BaseOneOfSchema
class ParallelSchema(BaseOneOfSchema):
TYPE_FIELD = "kind"
TYPE_FIELD_REMOVE = False
SCHEMAS = {
V1Mapping.IDENTIFIER: MappingSchema,
V1GridSearch.IDENTIFIER: GridSearchSchema,
V1RandomSearch.IDENTIFIER: RandomSearchSchema,
V1Hyperband.IDENTIFIER: HyperbandSchema,
V1Bayes.IDENTIFIER: BayesSchema,
V1Hyperopt.IDENTIFIER: HyperoptSchema,
V1Iterative.IDENTIFIER: IterativeSchema,
}
class ParallelMixin(object):
def get_parallel_kind(self):
raise NotImplementedError
@property
def has_mapping_parallel(self):
return self.get_parallel_kind() == V1Mapping.IDENTIFIER
@property
def has_grid_search_parallel(self):
return self.get_parallel_kind() == V1GridSearch.IDENTIFIER
@property
def has_random_search_parallel(self):
return self.get_parallel_kind() == V1RandomSearch.IDENTIFIER
@property
def has_hyperband_parallel(self):
return self.get_parallel_kind() == V1Hyperband.IDENTIFIER
@property
def has_bo_parallel(self):
return self.get_parallel_kind() == V1Bayes.IDENTIFIER
@property
def has_hyperopt_parallel(self):
return self.get_parallel_kind() == V1Hyperopt.IDENTIFIER
@property
def has_iterative_parallel(self):
return self.get_parallel_kind() == V1Iterative.IDENTIFIER
|
gregmbi/polyaxon | core/tests/test_polytune/test_random_search.py | <reponame>gregmbi/polyaxon
#!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest.mock import patch
import pytest
from tests.utils import BaseTestCase
from polyaxon.polyflow.parallel import V1RandomSearch
from polyaxon.polytune.search_managers.random_search.manager import RandomSearchManager
@pytest.mark.polytune_mark
class TestRandomSearch(BaseTestCase):
def test_random_search_config(self):
assert RandomSearchManager.CONFIG == V1RandomSearch
def test_get_suggestions(self):
config = V1RandomSearch.from_dict(
{
"concurrency": 2,
"numRuns": 10,
"params": {
"feature1": {"kind": "choice", "value": [1, 2]},
"feature3": {"kind": "range", "value": [1, 3, 1]},
},
}
)
assert len(RandomSearchManager(config).get_suggestions()) == 4
config = V1RandomSearch.from_dict(
{
"concurrency": 2,
"numRuns": 10,
"params": {
"feature1": {"kind": "pchoice", "value": [(1, 0.1), (2, 0.6)]},
"feature3": {"kind": "range", "value": [1, 3, 1]},
},
}
)
assert len(RandomSearchManager(config).get_suggestions()) == 4
config = V1RandomSearch.from_dict(
{
"concurrency": 2,
"numRuns": 10,
"params": {
"feature1": {"kind": "choice", "value": [1, 2, 3]},
"feature2": {"kind": "linspace", "value": [1, 2, 5]},
"feature3": {"kind": "range", "value": [1, 5, 1]},
},
}
)
assert len(RandomSearchManager(config).get_suggestions()) == 10
config = V1RandomSearch.from_dict(
{
"concurrency": 2,
"numRuns": 10,
"params": {
"feature1": {
"kind": "pchoice",
"value": [(1, 0.3), (2, 0.3), (3, 0.3)],
},
"feature2": {"kind": "uniform", "value": [0, 1]},
"feature3": {"kind": "qlognormal", "value": [0, 0.5, 0.51]},
},
}
)
assert len(RandomSearchManager(config).get_suggestions()) == 10
def test_get_suggestions_calls_sample(self):
config = V1RandomSearch.from_dict(
{
"concurrency": 2,
"numRuns": 1,
"params": {
"feature1": {"kind": "choice", "value": [1, 2, 3]},
"feature2": {"kind": "linspace", "value": [1, 2, 5]},
"feature3": {"kind": "range", "value": [1, 5, 1]},
},
}
)
with patch(
"polyaxon.polytune.search_managers.random_search.manager.sample"
) as sample_mock:
RandomSearchManager(config).get_suggestions()
assert sample_mock.call_count == 3
config = V1RandomSearch.from_dict(
{
"concurrency": 2,
"numRuns": 1,
"params": {
"feature1": {
"kind": "pchoice",
"value": [(1, 0.3), (2, 0.3), (3, 0.3)],
},
"feature2": {"kind": "uniform", "value": [0, 1]},
"feature3": {"kind": "qlognormal", "value": [0, 0.5, 0.51]},
"feature4": {"kind": "range", "value": [1, 5, 1]},
},
}
)
with patch(
"polyaxon.polytune.search_managers.random_search.manager.sample"
) as sample_mock:
RandomSearchManager(config).get_suggestions()
assert sample_mock.call_count == 4
|
gregmbi/polyaxon | core/polyaxon/notifiers/slack_webhook.py | #!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict
from polyaxon.connections.kinds import V1ConnectionKind
from polyaxon.notifiers.keys import INTEGRATIONS_WEBHOOKS_SLACK
from polyaxon.notifiers.spec import NotificationSpec
from polyaxon.notifiers.webhook import WebHookNotifier
from polyaxon.utils.date_utils import to_timestamp
class SlackWebHookNotifier(WebHookNotifier):
notification_key = V1ConnectionKind.SLACK
name = "Slack WebHook"
description = "Slack webhooks to send payload to Slack Incoming Webhooks."
raise_empty_context = True
config_key = INTEGRATIONS_WEBHOOKS_SLACK
validate_keys = ["channel", "icon_url"]
@classmethod
def serialize_notification_to_context(cls, notification: NotificationSpec) -> Dict:
logo_url = "" # TODO: add logo url
fields = [] # Use build_field
payload = {
"fallback": notification.condition.type,
"title": notification.get_title(),
"title_link": cls.get_url(),
"text": notification.get_details(),
"fields": fields,
"mrkdwn_in": ["text"],
"footer_icon": logo_url,
"footer": "Polyaxon",
"color": notification.get_color(),
"ts": to_timestamp(notification.condition.last_transition_time),
}
return payload
@classmethod
def _prepare(cls, context):
context = super()._prepare(context)
data = {
"fallback": context.get("fallback"),
"title": context.get("title"),
"title_link": context.get("title_link"),
"text": context.get("text"),
"fields": context.get("fields"),
"mrkdwn_in": context.get("mrkdwn_in"),
"footer_icon": context.get("footer_icon"),
"footer": context.get("footer", "Polyaxon"),
"color": context.get("color"),
}
return {"attachments": [data]}
@classmethod
def _pre_execute_web_hook(cls, data, config):
channel = config.get("channel")
icon_url = config.get("channel")
if channel:
data["channel"] = channel
if icon_url:
data["icon_url"] = icon_url
return data
|
gregmbi/polyaxon | core/polyaxon/cli/run.py | #!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import click
from marshmallow import ValidationError
from polyaxon import settings
from polyaxon.cli.executor import docker_run, k8s_run, platform_run
from polyaxon.env_vars.getters import get_project_or_local
from polyaxon.exceptions import PolyaxonSchemaError
from polyaxon.logger import clean_outputs
from polyaxon.polyaxonfile import (
CompiledOperationSpecification,
OperationSpecification,
check_polyaxonfile,
)
from polyaxon.utils.formatting import Printer
from polyaxon.utils.validation import validate_tags
@click.command()
@click.option("--project", "-p", type=str)
@click.option(
"-f",
"--file",
"polyaxonfile",
multiple=True,
type=click.Path(exists=True),
help="The polyaxonfiles to run.",
)
@click.option(
"-pm", "--python-module", type=str, help="The python module to run.",
)
@click.option(
"--name",
type=str,
help="Name to give to this run, must be unique within the project, could be none.",
)
@click.option("--tags", type=str, help="Tags of this run, comma separated values.")
@click.option("--description", type=str, help="The description to give to this run.")
@click.option(
"--upload",
"-u",
is_flag=True,
default=False,
help="To upload the repo before running.",
)
@click.option(
"--log",
"-l",
is_flag=True,
default=False,
help="To start logging after scheduling the run.",
)
@click.option(
"--watch",
"-w",
is_flag=True,
default=False,
help="To start statuses watch loop after scheduling the run.",
)
@click.option(
"--local",
is_flag=True,
default=False,
help="To start the run locally, with `docker` environment as default.",
)
@click.option("--conda_env", type=str, help="To start a local run with `conda`.")
@click.option(
"--params",
"-P",
metavar="NAME=VALUE",
multiple=True,
help="A parameter to override the default params of the run, form -P name=value.",
)
@click.option("--profile", type=str, help="Name of the profile to use for this run.")
@click.option("--queue", type=str, help="Name of the queue to use for this run.")
@click.option(
"--nocache",
is_flag=True,
default=None,
help="Check cache before starting this operation.",
)
@click.pass_context
@clean_outputs
def run(
ctx,
project,
polyaxonfile,
python_module,
name,
tags,
description,
upload,
log,
watch,
local,
conda_env,
params,
profile,
queue,
nocache,
):
"""Run polyaxonfile specification.
Examples:
\b
```bash
$ polyaxon run -f file -f file_override ...
```
Upload before running
\b
```bash
$ polyaxon run -f file -u
```
Run and set description and tags for this run
\b
```bash
$ polyaxon run -f file -u --description="Description of the current run" --tags="foo, bar, moo"
```
Run and set a unique name for this run
\b
```bash
polyaxon run --name=foo
```
Run for a specific project
\b
```bash
$ polyaxon run -p project1 -f file.yaml
```
Run with updated params
\b
```bash
$ polyaxon run -p project1 -f file.yaml -P param1=234.2 -P param2=relu
```
"""
op_spec = check_polyaxonfile(
polyaxonfile=polyaxonfile,
python_module=python_module,
params=params,
profile=profile,
queue=queue,
nocache=nocache,
log=False,
)
owner, project_name = get_project_or_local(project, is_cli=True)
tags = validate_tags(tags)
if local:
try:
compiled_operation = OperationSpecification.compile_operation(op_spec)
compiled_operation = CompiledOperationSpecification.apply_context(
compiled_operation
)
except (PolyaxonSchemaError, ValidationError):
Printer.print_error(
"Could not run this polyaxonfile locally, "
"a context is required to resolve it dependencies."
)
sys.exit(1)
docker_run(
ctx=ctx,
name=name,
owner=owner,
project_name=project_name,
description=description,
tags=tags,
compiled_operation=compiled_operation,
log=log,
)
elif settings.CLIENT_CONFIG.no_api:
k8s_run(
ctx=ctx,
name=name,
owner=owner,
project_name=project_name,
description=description,
tags=tags,
op_spec=op_spec,
upload=upload,
log=log,
can_upload=all([upload, project]),
)
else:
platform_run(
ctx=ctx,
name=name,
owner=owner,
project_name=project_name,
description=description,
tags=tags,
op_spec=op_spec,
upload=upload,
log=log,
watch=watch,
can_upload=all([upload, project]),
)
|
gregmbi/polyaxon | core/tests/test_managers/test_base.py | #!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
from mock import patch
from tests.utils import BaseTestCase
from polyaxon.managers.base import BaseConfigManager
@pytest.mark.managers_mark
class TestBaseConfigManger(BaseTestCase):
def test_default_props(self):
assert BaseConfigManager.IS_GLOBAL is False
assert BaseConfigManager.IS_POLYAXON_DIR is False
assert BaseConfigManager.CONFIG_FILE_NAME is None
assert BaseConfigManager.CONFIG is None
@patch("polyaxon.managers.base.os.path.expanduser")
def test_get_config_filepath(self, expanduser):
expanduser.return_value = "/tmp/"
BaseConfigManager.CONFIG_FILE_NAME = "testing"
# Test configuration
# Set IS_GLOBAL = True
BaseConfigManager.IS_GLOBAL = False
# Set IS_POLYAXON_DIR = True
BaseConfigManager.IS_POLYAXON_DIR = True
with patch.object(BaseConfigManager, "_create_dir") as path_fct:
config_file1 = BaseConfigManager.get_config_filepath(create=True)
assert path_fct.call_count == 1
with patch.object(BaseConfigManager, "_create_dir") as path_fct:
config_file2 = BaseConfigManager.get_config_filepath(create=False)
assert path_fct.call_count == 0
assert config_file1 == config_file2
assert config_file1 == os.path.join(".", ".polyaxon", "testing")
# Test configuration
# Set IS_POLYAXON_DIR = True
BaseConfigManager.IS_POLYAXON_DIR = False
with patch.object(BaseConfigManager, "_create_dir") as path_fct:
config_file1 = BaseConfigManager.get_config_filepath(create=True)
assert path_fct.call_count == 0
with patch.object(BaseConfigManager, "_create_dir") as path_fct:
config_file2 = BaseConfigManager.get_config_filepath(create=False)
assert path_fct.call_count == 0
assert config_file1 == config_file2
assert config_file1 == os.path.join(".", "testing")
# Test configuration
# Set IS_GLOBAL = True
BaseConfigManager.IS_GLOBAL = True
with patch.object(BaseConfigManager, "_create_dir") as path_fct:
config_file1 = BaseConfigManager.get_config_filepath(create=True)
assert path_fct.call_count == 1
with patch.object(BaseConfigManager, "_create_dir") as path_fct:
config_file2 = BaseConfigManager.get_config_filepath(create=False)
assert path_fct.call_count == 0
assert config_file1 == config_file2
assert config_file1 == os.path.join("/tmp/", ".polyaxon", "testing")
def test_is_initialized(self):
with patch.object(BaseConfigManager, "get_config_filepath") as path_fct1:
with patch("polyaxon.managers.base.os.path.isfile") as path_fct2:
BaseConfigManager.is_initialized()
assert path_fct1.call_count == 1
assert path_fct1.call_args_list[0][0] == (False,)
assert path_fct2.call_count == 1
|
gregmbi/polyaxon | core/polyaxon/polyflow/parallel/bayes.py | <reponame>gregmbi/polyaxon
#!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import polyaxon_sdk
from marshmallow import ValidationError, fields, validate, validates_schema
from polyaxon.polyflow.early_stopping import EarlyStoppingSchema
from polyaxon.polyflow.optimization import OptimizationMetricSchema
from polyaxon.polyflow.parallel.matrix import MatrixSchema
from polyaxon.schemas.base import BaseCamelSchema, BaseConfig
from polyaxon.schemas.fields.ref_or_obj import RefOrObject
class AcquisitionFunctions(object):
UCB = "ucb"
EI = "ei"
POI = "poi"
UCB_VALUES = [UCB, UCB.upper(), UCB.capitalize()]
EI_VALUES = [EI, EI.upper(), EI.capitalize()]
POI_VALUES = [POI, POI.upper(), POI.capitalize()]
VALUES = UCB_VALUES + EI_VALUES + POI_VALUES
@classmethod
def is_ucb(cls, value):
return value in cls.UCB_VALUES
@classmethod
def is_ei(cls, value):
return value in cls.EI_VALUES
@classmethod
def is_poi(cls, value):
return value in cls.POI_VALUES
class GaussianProcessesKernels(object):
RBF = "rbf"
MATERN = "matern"
RBF_VALUES = [RBF, RBF.upper(), RBF.capitalize()]
MATERN_VALUES = [MATERN, MATERN.upper(), MATERN.capitalize()]
VALUES = RBF_VALUES + MATERN_VALUES
@classmethod
def is_rbf(cls, value):
return value in cls.RBF_VALUES
@classmethod
def is_mattern(cls, value):
return value in cls.MATERN_VALUES
class GaussianProcessSchema(BaseCamelSchema):
kernel = fields.Str(
allow_none=True, validate=validate.OneOf(GaussianProcessesKernels.VALUES)
)
length_scale = fields.Float(allow_none=True)
nu = fields.Float(allow_none=True)
num_restarts_optimizer = fields.Int(allow_none=True)
@staticmethod
def schema_config():
return GaussianProcessConfig
class GaussianProcessConfig(BaseConfig):
SCHEMA = GaussianProcessSchema
IDENTIFIER = "gaussian_process"
def __init__(
self,
kernel=GaussianProcessesKernels.MATERN,
length_scale=1.0,
nu=1.5,
num_restarts_optimizer=0,
):
self.kernel = kernel
self.length_scale = length_scale
self.nu = nu
self.num_restarts_optimizer = num_restarts_optimizer
def validate_utility_function(acquisition_function, kappa, eps):
condition = AcquisitionFunctions.is_ucb(acquisition_function) and kappa is None
if condition:
raise ValidationError(
"the acquisition function `ucb` requires a parameter `kappa`"
)
condition = (
AcquisitionFunctions.is_ei(acquisition_function)
or AcquisitionFunctions.is_poi(acquisition_function)
) and eps is None
if condition:
raise ValidationError(
"the acquisition function `{}` requires a parameter `eps`".format(
acquisition_function
)
)
class UtilityFunctionSchema(BaseCamelSchema):
acquisition_function = fields.Str(
allow_none=True, validate=validate.OneOf(AcquisitionFunctions.VALUES)
)
gaussian_process = fields.Nested(GaussianProcessSchema, allow_none=True)
kappa = fields.Float(allow_none=True)
eps = fields.Float(allow_none=True)
num_warmup = fields.Int(allow_none=True)
num_iterations = fields.Int(allow_none=True)
@staticmethod
def schema_config():
return UtilityFunctionConfig
@validates_schema
def validate_utility_function(self, data, **kwargs):
validate_utility_function(
acquisition_function=data.get("acquisition_function"),
kappa=data.get("kappa"),
eps=data.get("eps"),
)
class UtilityFunctionConfig(BaseConfig):
SCHEMA = UtilityFunctionSchema
IDENTIFIER = "utility_function"
REDUCED_ATTRIBUTES = ["numWarmup", "numIterations"]
def __init__(
self,
acquisition_function=AcquisitionFunctions.UCB,
gaussian_process=None,
kappa=None,
eps=None,
num_warmup=None,
num_iterations=None,
):
validate_utility_function(
acquisition_function=acquisition_function, kappa=kappa, eps=eps
)
self.acquisition_function = acquisition_function
self.gaussian_process = gaussian_process
self.kappa = kappa
self.eps = eps
self.num_warmup = num_warmup
self.num_iterations = num_iterations
def validate_matrix(matrix):
if not matrix:
return None
for key, value in matrix.items():
if value.is_distribution and not value.is_uniform:
raise ValidationError(
"`{}` defines a non uniform distribution, "
"and it cannot be used with bayesian optimization.".format(key)
)
return matrix
class BayesSchema(BaseCamelSchema):
kind = fields.Str(allow_none=True, validate=validate.Equal("bayes"))
utility_function = fields.Nested(UtilityFunctionSchema, allow_none=True)
num_initial_runs = RefOrObject(fields.Int(), required=True)
num_iterations = RefOrObject(fields.Int(), required=True)
metric = fields.Nested(OptimizationMetricSchema, required=True)
params = fields.Dict(
keys=fields.Str(), values=fields.Nested(MatrixSchema), required=True
)
seed = RefOrObject(fields.Int(allow_none=True))
concurrency = fields.Int(allow_none=True)
early_stopping = fields.Nested(EarlyStoppingSchema, many=True, allow_none=True)
@staticmethod
def schema_config():
return V1Bayes
@validates_schema
def validate_matrix(self, data, **kwargs):
"""Validates matrix data and creates the config objects"""
validate_matrix(data.get("params"))
class V1Bayes(BaseConfig, polyaxon_sdk.V1Bayes):
SCHEMA = BayesSchema
IDENTIFIER = "bayes"
REDUCED_ATTRIBUTES = ["seed", "concurrency", "earlyStopping"]
|
gregmbi/polyaxon | core/polyaxon/polyaxonfile/specs/compiled_operation.py | <filename>core/polyaxon/polyaxonfile/specs/compiled_operation.py<gh_stars>0
#!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict
from polyaxon import types
from polyaxon.exceptions import PolyaxonfileError, PolyaxonSchemaError
from polyaxon.polyaxonfile.specs import BaseSpecification, kinds
from polyaxon.polyaxonfile.specs.libs.parser import Parser
from polyaxon.polyflow import ( # noqa
ParamSpec,
V1CompiledOperation,
V1Dag,
V1Init,
V1Param,
V1RunKind,
)
class CompiledOperationSpecification(BaseSpecification):
"""The polyaxonfile specification for compiled operation."""
_SPEC_KIND = kinds.COMPILED_OPERATION
CONFIG = V1CompiledOperation
@classmethod
def _parse(cls, config, params: Dict[str, ParamSpec]):
params = params or {}
parsed_data = Parser.parse(config, params)
return cls.CONFIG.read(parsed_data)
@staticmethod
def _update_params_with_contexts(
params: Dict[str, ParamSpec], contexts: Dict = None
) -> Dict[str, ParamSpec]:
contexts = contexts or {}
contexts = {
k: ParamSpec(
name=k, param=V1Param(value=v), iotype=types.ANY, is_flag=False,
)
for k, v in contexts.items()
}
params.update(contexts)
return params
@classmethod
def _apply_run_context(cls, config: V1CompiledOperation) -> V1CompiledOperation:
param_specs = config.validate_params(is_template=False, check_runs=True)
for param_spec in param_specs:
if not param_spec.param.is_literal:
raise PolyaxonfileError(
"apply_context received a non-resolved "
"ref param `{}` with value `{}`".format(
param_spec.name, param_spec.param.to_dict()
)
)
param_specs = {param_spec.name: param_spec for param_spec in param_specs}
return cls._parse(config, param_specs)
@staticmethod
def _apply_dag_context(config: V1CompiledOperation) -> V1CompiledOperation:
dag_run = config.run # type: V1Dag
dag_run.process_dag()
dag_run.validate_dag()
dag_run.process_components(config.inputs)
return config
@classmethod
def apply_context(cls, config: V1CompiledOperation) -> V1CompiledOperation:
if config.is_dag_run:
return cls._apply_dag_context(config)
else:
return cls._apply_run_context(config)
@classmethod
def apply_run_connections_params(
cls,
config: V1CompiledOperation,
artifact_store: str = None,
contexts: Dict = None,
) -> V1CompiledOperation:
params = config.validate_params(is_template=False, check_runs=True)
params = {param.name: param for param in params}
params = cls._update_params_with_contexts(params, contexts)
if config.run.kind in {V1RunKind.JOB, V1RunKind.SERVICE}:
if config.run.connections:
config.run.connections = Parser.parse_section(
config.run.connections, params=params, parse_params=True
)
if config.run.init:
init = []
for i in config.run.init:
if i.artifacts and not i.connection:
i.connection = artifact_store
resolved_i = V1Init.from_dict(
Parser.parse_section(
i.to_dict(), params=params, parse_params=True
)
)
init.append(resolved_i)
config.run.init = init
return config
@classmethod
def apply_params(
cls, config: V1CompiledOperation, params: Dict = None, context: Dict = None,
) -> V1CompiledOperation:
config.apply_params(params, context)
return config
@classmethod
def apply_run_contexts(cls, config: V1CompiledOperation, contexts=None):
if config.has_pipeline:
raise PolyaxonSchemaError(
"This method is not allowed on this specification."
)
params = config.validate_params(is_template=False, check_runs=True)
params = {param.name: param for param in params}
params = cls._update_params_with_contexts(params, contexts)
parsed_data = Parser.parse_run(config.to_dict(), params)
return cls.CONFIG.read(parsed_data)
|
gregmbi/polyaxon | core/polyaxon/settings.py | #!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from polyaxon.managers.auth import AuthConfigManager
from polyaxon.managers.client import ClientConfigManager
MIN_TIMEOUT = 1
LONG_REQUEST_TIMEOUT = 3600
HEALTH_CHECK_INTERVAL = 60
TMP_AUTH_GCS_ACCESS_PATH = "/tmp/.polyaxon/.gcsaccess.json"
AUTH_CONFIG = AuthConfigManager.get_config_from_env()
CLIENT_CONFIG = ClientConfigManager.get_config_from_env()
PROXIES_CONFIG = None
AGENT_CONFIG = None
if CLIENT_CONFIG.set_agent:
from polyaxon.managers.agent import AgentManager
AGENT_CONFIG = AgentManager.get_config_from_env(agent_path=CLIENT_CONFIG.agent_path)
def set_proxies_config():
from polyaxon.managers.proxies import ProxiesManager
global PROXIES_CONFIG
PROXIES_CONFIG = ProxiesManager.get_config_from_env()
|
gregmbi/polyaxon | core/polyaxon/utils/signal_decorators.py | <reponame>gregmbi/polyaxon<filename>core/polyaxon/utils/signal_decorators.py<gh_stars>0
# pylint:disable=inconsistent-return-statements
class IgnoreRawDecorator(object):
"""
The `IgnoreRawDecorator` is a decorator to ignore raw/fixture data during signals handling.
usage example:
@receiver(post_save, sender=settings.AUTH_USER_MODEL)
@ignore_raw
def my_signal_handler(sender, instance=None, created=False, **kwargs):
...
return ...
"""
def __init__(self, f):
self.f = f
def __call__(self, *args, **kwargs):
if kwargs.get("raw"):
# Ignore signal handling for fixture loading
return
return self.f(*args, **kwargs)
class IgnoreUpdatesDecorator(object):
"""
The `IgnoreUpdatesDecorator` is a decorator to ignore signals for updates.
usage example:
@receiver(post_save, sender=settings.AUTH_USER_MODEL)
@ignore_updates
@ignore_raw
def my_signal_handler(sender, instance=None, created=False, **kwargs):
...
return ...
"""
def __init__(self, f):
self.f = f
def __call__(self, *args, **kwargs):
if not kwargs.get("created", False):
# Ignore signal handling for updates
return
return self.f(*args, **kwargs)
class IgnoreUpdatesPreDecorator(object):
"""
The `IgnoreUpdatesPreDecorator` is a decorator to ignore signals for updates.
usage example:
@receiver(post_save, sender=settings.AUTH_USER_MODEL)
@ignore_updates_pre
@ignore_raw
def my_signal_handler(sender, instance=None, created=False, **kwargs):
...
return ...
"""
def __init__(self, f):
self.f = f
def __call__(self, *args, **kwargs):
if kwargs["instance"].pk:
# Ignore signal handling for updates
return
return self.f(*args, **kwargs)
class CheckSpecificationDecorator(object):
"""
The `CheckSpecificationDecorator` is a decorator to check if an instance has a specification.
usage example:
@receiver(post_save, sender=settings.AUTH_USER_MODEL)
@ignore_updates_pre
@check_specification
@ignore_raw
def my_signal_handler(sender, instance=None, created=False, **kwargs):
...
return ...
"""
def __init__(self, f):
self.f = f
def __call__(self, *args, **kwargs):
if not kwargs["instance"].specification:
# Ignore signal handling for instance without specification
return
return self.f(*args, **kwargs)
ignore_raw = IgnoreRawDecorator
ignore_updates = IgnoreUpdatesDecorator
ignore_updates_pre = IgnoreUpdatesPreDecorator
check_specification = CheckSpecificationDecorator
|
gregmbi/polyaxon | core/polyaxon/agents/base.py | <reponame>gregmbi/polyaxon<gh_stars>0
#!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import traceback
from concurrent.futures import ThreadPoolExecutor
from typing import Dict, Tuple
import polyaxon_sdk
from kubernetes.client.rest import ApiException
from polyaxon.agents import converter
from polyaxon.agents.spawners.spawner import Spawner
from polyaxon.client import PolyaxonClient
from polyaxon.env_vars.getters import get_run_info
from polyaxon.exceptions import PolypodException
from polyaxon.lifecycle import V1StatusCondition, V1Statuses
from polyaxon.logger import logger
from polyaxon.utils.workers_utils import exit_context, get_pool_workers, get_wait
class BaseAgent:
def __init__(self, sleep_interval=None):
self.sleep_interval = sleep_interval
self.spawner = Spawner()
self.client = PolyaxonClient()
self._graceful_shutdown = False
def get_state(self) -> polyaxon_sdk.V1AgentStateResponse:
raise NotImplementedError
def start(self) -> None:
try:
with exit_context() as exit_event:
index = 0
workers = get_pool_workers()
with ThreadPoolExecutor(workers) as pool:
logger.debug("Thread pool Workers: {}".format(workers))
timeout = self.sleep_interval or get_wait(index)
while not exit_event.wait(timeout=timeout):
index += 1
agent_state = self.process(pool)
if agent_state.status == V1Statuses.STOPPED:
self.end()
return
if agent_state.state.full:
index = 0
timeout = self.sleep_interval or get_wait(index)
logger.info("Sleeping for {} seconds".format(timeout))
finally:
self.end()
def end(self):
self._graceful_shutdown = True
logger.info("Agent is shutting down.")
def process(self, pool: "ThreadPoolExecutor") -> polyaxon_sdk.V1AgentStateResponse:
try:
agent_state = self.get_state()
if agent_state:
logger.info("Starting runs submission process.")
else:
logger.info("No state was found.")
return polyaxon_sdk.V1AgentStateResponse()
state = agent_state.state
for run_data in state.queued or []:
pool.submit(self.create_run, run_data)
for run_data in state.stopping or []:
pool.submit(self.stop_run, run_data)
for run_data in state.apply or []:
pool.submit(self.apply_run, run_data)
return agent_state
except Exception as exc:
logger.error(exc)
return polyaxon_sdk.V1AgentStateResponse()
def log_run_failed(
self,
run_owner: str,
run_project: str,
run_uuid: str,
exc: Exception,
message: str = None,
) -> None:
message = message or "Agent failed deploying run.\n"
message += "error: {}\n{}".format(repr(exc), traceback.format_exc())
self.log_run_status(
run_owner=run_owner,
run_project=run_project,
run_uuid=run_uuid,
status=V1Statuses.FAILED,
reason="PolyaxonAgentRunActionFailed",
message=message,
)
logger.warning(message)
def log_run_stopped(self, run_owner: str, run_project: str, run_uuid: str) -> None:
message = "Run was not found, so we assume it was stopped."
self.log_run_status(
run_owner=run_owner,
run_project=run_project,
run_uuid=run_uuid,
status=V1Statuses.STOPPED,
reason="PolyaxonAgentRunActionStopped",
message=message,
)
logger.warning(message)
def log_run_scheduled(
self, run_owner: str, run_project: str, run_uuid: str
) -> None:
message = "Run was scheduled by the agent."
self.log_run_status(
run_owner=run_owner,
run_project=run_project,
run_uuid=run_uuid,
status=V1Statuses.SCHEDULED,
reason="PolyaxonAgentRunActionScheduled",
message=message,
)
logger.info(message)
def log_run_running(self, run_owner: str, run_project: str, run_uuid: str) -> None:
message = "Run changes were applied by the agent."
self.log_run_status(
run_owner=run_owner,
run_project=run_project,
run_uuid=run_uuid,
status=V1Statuses.RUNNING,
reason="PolyaxonAgentRunActionRunning",
message=message,
)
logger.info(message)
def log_run_status(
self,
run_owner: str,
run_project: str,
run_uuid: str,
status: str,
reason: str = None,
message: str = None,
):
status_condition = V1StatusCondition.get_condition(
type=status, status=True, reason=reason, message=message
)
self.client.runs_v1.create_run_status(
owner=run_owner,
project=run_project,
uuid=run_uuid,
body={"condition": status_condition},
async_req=True,
)
def clean_run(self, run_uuid: str, run_kind: str):
try:
self.spawner.stop(run_uuid=run_uuid, run_kind=run_kind)
except ApiException as e:
if e.status == 404:
logger.debug("Run does not exist.")
except Exception as e:
logger.debug(
"Run could not be cleaned: {}\n{}".format(
repr(e), traceback.format_exc()
)
)
def prepare_run_resource(
self,
owner_name: str,
project_name: str,
run_name: str,
run_uuid: str,
content: str,
) -> Dict:
try:
return converter.convert(
owner_name=owner_name,
project_name=project_name,
run_name=run_name,
run_uuid=run_uuid,
content=content,
)
except PolypodException as e:
self.log_run_failed(
run_owner=owner_name,
run_project=project_name,
run_uuid=run_uuid,
exc=e,
message="Agent failed converting run manifest.\n",
)
except Exception as e:
self.log_run_failed(
run_owner=owner_name,
run_project=project_name,
run_uuid=run_uuid,
exc=e,
message="Agent failed during compilation with unknown exception.\n",
)
def create_run(self, run_data: Tuple[str, str, str, str]):
run_owner, run_project, run_uuid = get_run_info(run_instance=run_data[0])
resource = self.prepare_run_resource(
owner_name=run_owner,
project_name=run_project,
run_name=run_data[2],
run_uuid=run_uuid,
content=run_data[3],
)
try:
self.spawner.create(
run_uuid=run_uuid, run_kind=run_data[1], resource=resource
)
self.log_run_scheduled(
run_owner=run_owner, run_project=run_project, run_uuid=run_uuid
)
except ApiException as e:
if e.status == 409:
logger.info(
"Run already running running, triggering an apply mechanism."
)
self.apply_run(run_data=run_data)
except Exception as e:
self.log_run_failed(
run_owner=run_owner, run_project=run_project, run_uuid=run_uuid, exc=e
)
def apply_run(self, run_data: Tuple[str, str, str, str]):
run_owner, run_project, run_uuid = get_run_info(run_instance=run_data[0])
resource = self.prepare_run_resource(
owner_name=run_owner,
project_name=run_project,
run_name=run_data[2],
run_uuid=run_uuid,
content=run_data[3],
)
try:
self.spawner.apply(
run_uuid=run_uuid, run_kind=run_data[1], resource=resource
)
self.log_run_running(
run_owner=run_owner, run_project=run_project, run_uuid=run_uuid
)
except Exception as e:
self.log_run_failed(
run_owner=run_owner, run_project=run_project, run_uuid=run_uuid, exc=e
)
self.clean_run(run_uuid=run_uuid, run_kind=run_data[1])
def stop_run(self, run_data: Tuple[str, str]):
run_owner, run_project, run_uuid = get_run_info(run_instance=run_data[0])
try:
self.spawner.stop(run_uuid=run_uuid, run_kind=run_data[1])
except ApiException as e:
if e.status == 404:
logger.info("Run does not exist anymore, it could have been stopped.")
self.log_run_stopped(
run_owner=run_owner, run_project=run_project, run_uuid=run_uuid
)
except Exception as e:
self.log_run_failed(
run_owner=run_owner,
run_project=run_project,
run_uuid=run_uuid,
exc=e,
message="Agent failed stopping run.\n",
)
|
gregmbi/polyaxon | sdks/python/http_client/v1/polyaxon_sdk/models/v1_run_profile.py | #!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
Polyaxon SDKs and REST API specification.
Polyaxon SDKs and REST API specification. # noqa: E501
The version of the OpenAPI document: 1.0.79
Contact: <EMAIL>
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from polyaxon_sdk.configuration import Configuration
class V1RunProfile(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
"uuid": "str",
"name": "str",
"description": "str",
"tags": "list[str]",
"created_at": "datetime",
"updated_at": "datetime",
"frozen": "bool",
"disabled": "bool",
"agent": "str",
"queue": "str",
"namespace": "str",
"termination": "object",
"environment": "object",
}
attribute_map = {
"uuid": "uuid",
"name": "name",
"description": "description",
"tags": "tags",
"created_at": "created_at",
"updated_at": "updated_at",
"frozen": "frozen",
"disabled": "disabled",
"agent": "agent",
"queue": "queue",
"namespace": "namespace",
"termination": "termination",
"environment": "environment",
}
def __init__(
self,
uuid=None,
name=None,
description=None,
tags=None,
created_at=None,
updated_at=None,
frozen=None,
disabled=None,
agent=None,
queue=None,
namespace=None,
termination=None,
environment=None,
local_vars_configuration=None,
): # noqa: E501
"""V1RunProfile - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._uuid = None
self._name = None
self._description = None
self._tags = None
self._created_at = None
self._updated_at = None
self._frozen = None
self._disabled = None
self._agent = None
self._queue = None
self._namespace = None
self._termination = None
self._environment = None
self.discriminator = None
if uuid is not None:
self.uuid = uuid
if name is not None:
self.name = name
if description is not None:
self.description = description
if tags is not None:
self.tags = tags
if created_at is not None:
self.created_at = created_at
if updated_at is not None:
self.updated_at = updated_at
if frozen is not None:
self.frozen = frozen
if disabled is not None:
self.disabled = disabled
if agent is not None:
self.agent = agent
if queue is not None:
self.queue = queue
if namespace is not None:
self.namespace = namespace
if termination is not None:
self.termination = termination
if environment is not None:
self.environment = environment
@property
def uuid(self):
"""Gets the uuid of this V1RunProfile. # noqa: E501
:return: The uuid of this V1RunProfile. # noqa: E501
:rtype: str
"""
return self._uuid
@uuid.setter
def uuid(self, uuid):
"""Sets the uuid of this V1RunProfile.
:param uuid: The uuid of this V1RunProfile. # noqa: E501
:type: str
"""
self._uuid = uuid
@property
def name(self):
"""Gets the name of this V1RunProfile. # noqa: E501
:return: The name of this V1RunProfile. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this V1RunProfile.
:param name: The name of this V1RunProfile. # noqa: E501
:type: str
"""
self._name = name
@property
def description(self):
"""Gets the description of this V1RunProfile. # noqa: E501
:return: The description of this V1RunProfile. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this V1RunProfile.
:param description: The description of this V1RunProfile. # noqa: E501
:type: str
"""
self._description = description
@property
def tags(self):
"""Gets the tags of this V1RunProfile. # noqa: E501
:return: The tags of this V1RunProfile. # noqa: E501
:rtype: list[str]
"""
return self._tags
@tags.setter
def tags(self, tags):
"""Sets the tags of this V1RunProfile.
:param tags: The tags of this V1RunProfile. # noqa: E501
:type: list[str]
"""
self._tags = tags
@property
def created_at(self):
"""Gets the created_at of this V1RunProfile. # noqa: E501
:return: The created_at of this V1RunProfile. # noqa: E501
:rtype: datetime
"""
return self._created_at
@created_at.setter
def created_at(self, created_at):
"""Sets the created_at of this V1RunProfile.
:param created_at: The created_at of this V1RunProfile. # noqa: E501
:type: datetime
"""
self._created_at = created_at
@property
def updated_at(self):
"""Gets the updated_at of this V1RunProfile. # noqa: E501
:return: The updated_at of this V1RunProfile. # noqa: E501
:rtype: datetime
"""
return self._updated_at
@updated_at.setter
def updated_at(self, updated_at):
"""Sets the updated_at of this V1RunProfile.
:param updated_at: The updated_at of this V1RunProfile. # noqa: E501
:type: datetime
"""
self._updated_at = updated_at
@property
def frozen(self):
"""Gets the frozen of this V1RunProfile. # noqa: E501
:return: The frozen of this V1RunProfile. # noqa: E501
:rtype: bool
"""
return self._frozen
@frozen.setter
def frozen(self, frozen):
"""Sets the frozen of this V1RunProfile.
:param frozen: The frozen of this V1RunProfile. # noqa: E501
:type: bool
"""
self._frozen = frozen
@property
def disabled(self):
"""Gets the disabled of this V1RunProfile. # noqa: E501
:return: The disabled of this V1RunProfile. # noqa: E501
:rtype: bool
"""
return self._disabled
@disabled.setter
def disabled(self, disabled):
"""Sets the disabled of this V1RunProfile.
:param disabled: The disabled of this V1RunProfile. # noqa: E501
:type: bool
"""
self._disabled = disabled
@property
def agent(self):
"""Gets the agent of this V1RunProfile. # noqa: E501
:return: The agent of this V1RunProfile. # noqa: E501
:rtype: str
"""
return self._agent
@agent.setter
def agent(self, agent):
"""Sets the agent of this V1RunProfile.
:param agent: The agent of this V1RunProfile. # noqa: E501
:type: str
"""
self._agent = agent
@property
def queue(self):
"""Gets the queue of this V1RunProfile. # noqa: E501
:return: The queue of this V1RunProfile. # noqa: E501
:rtype: str
"""
return self._queue
@queue.setter
def queue(self, queue):
"""Sets the queue of this V1RunProfile.
:param queue: The queue of this V1RunProfile. # noqa: E501
:type: str
"""
self._queue = queue
@property
def namespace(self):
"""Gets the namespace of this V1RunProfile. # noqa: E501
:return: The namespace of this V1RunProfile. # noqa: E501
:rtype: str
"""
return self._namespace
@namespace.setter
def namespace(self, namespace):
"""Sets the namespace of this V1RunProfile.
:param namespace: The namespace of this V1RunProfile. # noqa: E501
:type: str
"""
self._namespace = namespace
@property
def termination(self):
"""Gets the termination of this V1RunProfile. # noqa: E501
:return: The termination of this V1RunProfile. # noqa: E501
:rtype: object
"""
return self._termination
@termination.setter
def termination(self, termination):
"""Sets the termination of this V1RunProfile.
:param termination: The termination of this V1RunProfile. # noqa: E501
:type: object
"""
self._termination = termination
@property
def environment(self):
"""Gets the environment of this V1RunProfile. # noqa: E501
:return: The environment of this V1RunProfile. # noqa: E501
:rtype: object
"""
return self._environment
@environment.setter
def environment(self, environment):
"""Sets the environment of this V1RunProfile.
:param environment: The environment of this V1RunProfile. # noqa: E501
:type: object
"""
self._environment = environment
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value)
)
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict")
else item,
value.items(),
)
)
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1RunProfile):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1RunProfile):
return True
return self.to_dict() != other.to_dict()
|
gregmbi/polyaxon | core/polyaxon/k8s/custom_resources/operation.py | <reponame>gregmbi/polyaxon<filename>core/polyaxon/k8s/custom_resources/operation.py
#!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict
from polyaxon.k8s.custom_resources.crd import get_custom_object
KIND = "Operation"
PLURAL = "operations"
API_VERSION = "v1"
GROUP = "core.polyaxon.com"
def get_operation_custom_object(
resource_name: str, namespace: str, custom_object: Dict, labels: Dict[str, str]
) -> Dict:
return get_custom_object(
resource_name=resource_name,
namespace=namespace,
kind=KIND,
api_version="{}/{}".format(GROUP, API_VERSION),
labels=labels,
custom_object=custom_object,
)
def get_run_instance(owner: str, project: str, run_uuid: str) -> str:
return "{}.{}.runs.{}".format(owner, project, run_uuid)
def get_notifier_instance(owner: str, project: str, run_uuid: str) -> str:
return "{}.{}.notifiers.{}".format(owner, project, run_uuid)
def get_resource_name(run_uuid: str) -> str:
return "plx-operation-{}".format(run_uuid)
def get_notifier_resource_name(run_uuid: str) -> str:
return "plx-notifier-{}".format(run_uuid)
|
jsbankole/kairos-fhir-dsl-mapping-example | src/main/groovy/projects/gecco/crf/GroovyGeneratorFromTemplate/Diseases/generateGroovyFromTemplate.py | import pandas as pd
import numpy as np
import os
template_file_list = []
values_file_list = []
file_name_root_list = []
######### CARDIOVASCULAR DISEASES ############
template_file_list.append("template_CardiovascularDiseases")
values_file_list.append("values_CardiovascularDiseases.xlsx")
file_name_root_list.append("conditionCardiovascularDisease_")
##############################################
############## LIVER DISEASES ################
template_file_list.append("template_LiverDiseases")
values_file_list.append("values_LiverDiseases.xlsx")
file_name_root_list.append("conditionLiverDisease_")
##############################################
############# LUNG DISEASES ##################
template_file_list.append("template_LungDiseases")
values_file_list.append("values_LungDiseases.xlsx")
file_name_root_list.append("conditionLungDiseases_")
##############################################
############### NEURO DISEASES ###############
template_file_list.append("template_NeuroDiseases")
values_file_list.append("values_NeuroDiseases.xlsx")
file_name_root_list.append("conditionNeuroDisease_")
##############################################
############ RHEUMA/IMUNO DISEASES ###########
template_file_list.append("template_RheumaImunoDiseases")
values_file_list.append("values_RheumaImunoDiseases.xlsx")
file_name_root_list.append("conditionRheumaImunoDisease_")
##############################################
# Auxiliar file with excerpt for ExportResourceMappingConfig
aux_file_name = "aux_ExportResourceMappingConfig.txt"
if os.path.exists(aux_file_name):
os.remove(aux_file_name)
# Iterate over each "area"
for (template_file, values_file, file_name_root) in zip(template_file_list, values_file_list, file_name_root_list):
# Load corresponding template file as a string
with open(template_file, "r") as f:
templateString = f.read()
# Load corresponding values from excel
values_df = pd.read_excel(values_file)
values_df = values_df.replace(np.nan, '')
# Fields to replace in template (name of column in the excel)
available_fields = ["ParameterCodeDisease", "IdComplement", "ICDCode", "DiseaseName-EN", "SnomedCode"]
# Iterate over the diseases from the excel file
for _, row in values_df.iterrows():
# Replace disease specific values in the template
updated_template = templateString
for field_name in available_fields:
updated_template = updated_template.replace(f"##{field_name}##", str(row[field_name]))
# save newly generated groovy file
new_file_name = file_name_root + row["IdComplement"].lower()
with open(new_file_name + ".groovy", "w") as f:
f.write(updated_template)
# Add new file info to excerpt of ExportResourceMappingConfig
with open(aux_file_name, "a") as f:
append_str = f"""{{
"selectFromCxxEntity": "STUDY_VISIT_ITEM",
"transformByTemplate": "{new_file_name}",
"exportToFhirResource": "Condition"
}},\n"""
f.write(append_str)
|
jsbankole/kairos-fhir-dsl-mapping-example | src/main/groovy/projects/gecco/crf/GroovyGeneratorFromTemplate/History of Travel/generateHistoryTravel.py | <reponame>jsbankole/kairos-fhir-dsl-mapping-example
import os
# Number of times the history of travel must be created
nb_iterations = 2
# Path for the template file
template = "historyTravelTemplate"
# Auxiliar file with excerpt for ExportResourceMappingConfig
aux_file_name = "aux_ExportResourceMappingConfig.txt"
if os.path.exists(aux_file_name):
os.remove(aux_file_name)
# Load corresponding template file as a string
with open(template, "r") as f:
templateString = f.read()
# Iterate over each "area"
for i in range(nb_iterations):
# Replace iter by nb
updated_template = templateString.replace(f"##iter##", str(i))
# save newly generated groovy file
new_file_name = "observationHistoryOfTravel_" + str(i)
with open(new_file_name + ".groovy", "w") as f:
f.write(updated_template)
# Add new file info to excerpt of ExportResourceMappingConfig
with open(aux_file_name, "a") as f:
append_str = f"""{{
"selectFromCxxEntity": "STUDY_VISIT_ITEM",
"transformByTemplate": "observationHistoryOfTravel_{i}",
"exportToFhirResource": "Observation"
}},\n"""
f.write(append_str)
|
kylebystrom/pawpyseed | pawpyseed/core/noncollinear.py | <filename>pawpyseed/core/noncollinear.py
from pawpyseed.core.wavefunction import *
class NCLWavefunction(pawpyc.CNCLWavefunction, Wavefunction):
def __init__(self, struct, pwf, cr, dim, symprec=1e-4, setup_projectors=False):
"""
Arguments:
struct (pymatgen.core.Structure): structure that the wavefunction describes
pwf (pawpyc.PWFPointer): holder class for pswf_t and k-points/k-point weights
cr (CoreRegion): Contains the pseudopotentials, with projectors and
partials waves, for the structure
dim (pymatgen.io.vasp.outputs.Outcar OR np.ndarry OR list of length 3):
Outcar object for reading ngf or the dimensions NG* of the FFT grid
setup_projectors (bool, False): Whether to set up the core region
components of the wavefunctions. Pawpyseed will set up the projectors
automatically when they are first needed, so this generally
can be left as False.
Returns:
Wavefunction object
"""
self.band_props = pwf.band_props.copy(order="C")
super(Wavefunction, self).__init__(pwf)
if not self.ncl:
raise PAWpyError(
"Pseudowavefunction is collinear! Call Wavefunction(...) instead"
)
self.structure = struct
self.cr = cr
self.dim = np.array(dim).astype(np.int32)
if setup_projectors:
self.check_c_projectors()
@staticmethod
def from_files(
struct="CONTCAR",
wavecar="WAVECAR",
cr="POTCAR",
vr="vasprun.xml",
setup_projectors=False,
):
"""
Construct a Wavefunction object from file paths.
Arguments:
struct (str): VASP POSCAR or CONTCAR file path
wavecar (str): VASP WAVECAR file path
cr (str): VASP POTCAR file path
vr (str): VASP vasprun file path
outcar (str): VASP OUTCAR file path
setup_projectors (bool, False): Whether to set up the core region
components of the wavefunctions. Pawpyseed will set up the projectors
automatically when they are first needed, so this generally
can be left as False.
Returns:
Wavefunction object
"""
vr = Vasprun(vr)
dim = np.array(
[vr.parameters["NGX"], vr.parameters["NGY"], vr.parameters["NGZ"]]
)
symprec = vr.parameters["SYMPREC"]
pwf = pawpyc.PWFPointer(wavecar, vr)
return NCLWavefunction(
Poscar.from_file(struct).structure,
pwf,
CoreRegion(Potcar.from_file(cr)),
dim,
symprec,
setup_projectors,
)
@staticmethod
def from_directory(path, setup_projectors=False):
"""
Assumes VASP output has the default filenames and is located
in the directory specificed by path.
Arguments:
path (str): VASP output directory
setup_projectors (bool, False): Whether to set up the core region
components of the wavefunctions. Pawpyseed will set up the projectors
automatically when they are first needed, so this generally
can be left as False.
Returns:
Wavefunction object
"""
filepaths = []
for d in ["CONTCAR", "WAVECAR", "POTCAR", "vasprun.xml"]:
filepaths.append(str(os.path.join(path, d)))
args = filepaths + [setup_projectors]
return NCLWavefunction.from_files(*args)
def desymmetrized_copy(self, allkpts=None, weights=None):
raise NotImplementedError()
def write_state_realspace(
self, b, k, s, fileprefix="", dim=None, scale=1, remove_phase=False
):
"""
Writes the real and imaginary parts of a given band to two files,
prefixed by fileprefix
Args:
b (int): band number (0-indexed!)
k (int): kpoint number (0-indexed!)
s (int): spin number (0-indexed!)
fileprefix (string, ""): first part of the file name
dim (numpy array of 3 ints, None): dimensions of the FFT grid
scale (scalar, 1): number to multiply the realspace wavefunction by.
For example, VASP multiplies charge density by the volume
of the structure.
remove_phase (False): If True, removes the e^(ikr) phase
from the wavefunction (this does not necessarily mean
the wavefunction is real). This is useful if you want
to visualize the wavefunction because the e^(ikr) phase
makes the wavefunction non-periodic
Returns:
A 3D array (indexed by x,y,z where x,y,z are fractional coordinates)
with complex double values for the realspace wavefunction
The wavefunction is written in two files with z the slow index.
"""
self.check_c_projectors()
if dim is not None:
self.update_dim(np.array(dim))
filename_base = "%sB%dK%dS%d" % (fileprefix, b, k, s)
filename1 = "%s_UP_REAL.vasp" % filename_base
filename2 = "%s_UP_IMAG.vasp" % filename_base
filename3 = "%s_DOWN_REAL.vasp" % filename_base
filename4 = "%s_DOWN_IMAG.vasp" % filename_base
res0, res1 = self._write_realspace_state(
filename1,
filename2,
filename3,
filename4,
scale,
b,
k,
s,
remove_phase=remove_phase,
)
self._convert_to_vasp_volumetric(filename1, self.dim)
self._convert_to_vasp_volumetric(filename2, self.dim)
self._convert_to_vasp_volumetric(filename3, self.dim)
self._convert_to_vasp_volumetric(filename4, self.dim)
return res0, res1
def write_density_realspace(self, filename="PYAECCAR.vasp", dim=None, scale=1):
"""
Writes the real and imaginary parts of a given band to two files,
prefixed by fileprefix
Args:
b (int): band number (0-indexed!)
k (int): kpoint number (0-indexed!)
s (int): spin number (0-indexed!)
fileprefix (string, ""): first part of the file name
dim (numpy array of 3 ints, None): dimensions of the FFT grid
scale (scalar, 1): number to multiply the realspace wavefunction by.
For example, VASP multiplies charge density by the volume
of the structure.
Returns:
A 3D array (indexed by x,y,z where x,y,z are fractional coordinates)
with complex double values for the realspace wavefunction
The charge density is written with z the slow index.
"""
self.check_c_projectors()
if dim is not None:
self.update_dim(np.array(dim))
res = self._write_realspace_density(filename, scale)
self._convert_to_vasp_volumetric(filename, self.dim)
return res
|
kylebystrom/pawpyseed | parse_headers.py | <reponame>kylebystrom/pawpyseed
import os
import re
directory = "pawpyseed/core/"
def write_pxd(pxdname, files):
full_file = ""
# full_file += '# cython : profile=True\n'
full_file += "# cython : language_level=3\n"
full_file += "from libc.stdio cimport FILE\n"
for fname in files:
f = open(directory + fname + ".h")
code = f.read()
code = re.sub("/\\*([^*]|[\r\n]|(\\*+([^*/]|[\r\n])))*\\*+/", "", code)
code = re.sub("; +", ";", code)
code = re.sub("(//|#)[^\n]+\n", "", code)
code = code.replace("{", ":;").replace("typedef", "ctypedef")
f.close()
code_lines = code.split(";")
code_lines = [c.split("*/")[-1] + "\n" for c in code_lines]
i = 0
full_file += '\n\ncdef extern from "%s.h":\n\n\t' % fname
while i < len(code_lines):
inc = True
while code_lines[i].startswith("\n"):
code_lines[i] = code_lines[i][1:]
if "ctypedef" in code_lines[i]:
linenum = i
"""
if '//' in code_lines[i]:
inc = False
code_lines[i] = code_lines[i].split('\n', 1)
if len(code_lines[i]) == 1:
code_lines.pop(i)
else:
code_lines[i] = code_lines[i][1]
"""
if "}" in code_lines[i]:
inc = False
mystr = code_lines[i].split("}")[-1].replace("\n", "")
code_lines[linenum] = "ctypedef struct " + mystr + ":\n"
code_lines.pop(i)
if inc:
if "(" in code_lines[i]:
code_lines[i] = "cdef " + code_lines[i]
i += 1
for line in code_lines:
full_file += line.replace("\n", "\n\t").replace("(void)", "()")
f = open(os.path.join(directory, pxdname), "w")
f.write(full_file.replace("\t", " "))
f.close()
write_pxd(
"pawpyc_extern.pxd",
[
"utils",
"projector",
"pseudoprojector",
"reader",
"density",
"sbt",
"linalg",
"radial",
"momentum",
],
)
write_pxd("tests/testc_extern.pxd", ["tests/tests", "utils"])
|
kylebystrom/pawpyseed | docs/colors.py | import re
import sys
filename = sys.argv[1]
p = re.compile("#[0-9A-Fa-f]{6}")
f = open(filename)
string = f.read()
f.close()
res = p.findall(string)
mapping = ['0','1','2','3','4','5','6','7','8','9','A','B','C','D','E','F',]
mapping = [item for item in zip(mapping, reversed(mapping))]
mapping = {k:v for k,v in mapping}
mapping2 = {'f':'0','e':'1','d':'2','c':'3','b':'4','a':'5'}
for key in mapping2:
mapping[key] = mapping2[key]
print(mapping)
def map(string):
mystr = string[1:]
mystr = [mapping[char] for char in mystr]
fstr = '#'
for char in mystr:
fstr += char
return fstr
for item in res:
string = string.replace(item, map(item))
string = string.replace('color: white', 'color: #000000')
string = string.replace('color: black', 'color: #32CD32')
print(string)
f = open(filename, 'w')
f.write(string)
f.close()
"""
p = re.compile("#[0-9a-f]{6}")
f = open(filename, 'r')
string = f.read()
f.close()
res = p.findall(string)
mapping = ['0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f']
mapping = [item for item in zip(mapping, reversed(mapping))]
mapping = {k:v for k,v in mapping}
print(mapping)
def map(string):
mystr = string[1:]
mystr = [mapping[char] for char in mystr]
fstr = '#'
for char in mystr:
fstr += char
return fstr
for item in res:
string = string.replace(item, map(item))
print(string)
f = open(filename, 'w')
f.write(string)
f.close()
"""
|
kylebystrom/pawpyseed | pawpyseed/compiler.py | <reponame>kylebystrom/pawpyseed<gh_stars>10-100
import subprocess
def compile_core(comp, scilib):
"""
ATTENTION, NOT FINISHED
"""
subprocess.call(("make pawpy_%s" % comp).split())
def compile_core(comp, scilib):
"""
ATTENTION, NOT FINISHED
"""
subprocess.call("make hfc".split())
|
kylebystrom/pawpyseed | pawpyseed/core/rayleigh.py | <filename>pawpyseed/core/rayleigh.py
import numpy as np
from scipy.special import sph_harm, spherical_jn
k = np.array([0.6, 0.2, 0.3]) * 2 * np.pi
def planewave(coord):
return np.exp(1j * (k[0] * grid[0] + k[1] * grid[1] + k[2] * grid[2])) * np.exp(
1j * np.dot(k, [1, 1, 1])
)
m, l, = (
1,
1,
)
def func(coord):
r = np.sqrt(coord[0] ** 2 + coord[1] ** 2 + coord[2] ** 2)
theta = np.nan_to_num(np.arccos(coord[2] / r))
phi = np.nan_to_num(np.arctan2(coord[1] / r, coord[0] / r))
return np.exp(-r) * sph_harm(m, l, phi, theta)
def rad_int(r):
kmag = np.linalg.norm(k)
theta = np.nan_to_num(np.arccos(k[2] / kmag))
phi = np.nan_to_num(np.arctan2(k[1] / kmag, k[0] / kmag))
vals = spherical_jn(l, kmag * r) * r * r * np.exp(-r)
print(vals)
return (
np.trapz(vals, r) * np.conj(sph_harm(m, l, phi, theta)) * 4 * np.pi * (1j) ** l
)
x = np.arange(-8, 8, 0.1)
y = np.arange(-8, 8, 0.1)
z = np.arange(-8, 8, 0.1)
x, y, z = np.meshgrid(x, y, z, indexing="ij")
grid = np.array([x, y, z])
w = planewave(grid)
f = func(grid)
integrand = w * np.conj(f)
temp1 = np.trapz(integrand, x, axis=0)
temp2 = np.trapz(temp1, y[0, :, :], axis=0)
final = np.trapz(temp2, z[0, 0, :], axis=0)
print(final)
r = np.arange(0, 8, 0.1)
integral = rad_int(r)
print(integral * np.exp(1j * np.dot(k, [1, 1, 1])))
|
kylebystrom/pawpyseed | pawpyseed/core/wavefunction.py | ## @package pawpyseed.core.wavefunction
# Base class containing Python classes for parsing files
# and storing and analyzing wavefunction data.
import os
import time
import numpy as np
from pymatgen.io.vasp.inputs import Poscar, Potcar
from pymatgen.io.vasp.outputs import Vasprun
import pawpyseed.core.symmetry as pawpy_symm
from pawpyseed.core import pawpyc
from pawpyseed.core.utils import *
class Pseudopotential:
"""
Contains important attributes from a VASP pseudopotential files. POTCAR
"settings" can be read from the pymatgen POTCAR object
If you use pymatgen, you can think of this as correlating with
the PotcarSingle object.
Note: for the following attributes, 'index' refers to an energy
quantum number epsilon and angular momentum quantum number l,
which define one set consisting of a projector function, all electron
partial waves, and pseudo partial waves.
Attributes:
rmax (np.float64): Maximum radius of the projection operators
grid (np.array): radial grid on which partial waves are defined
aepotential (np.array): All electron potential defined radially on grid
aecorecharge (np.array): All electron core charge defined radially
on grid (i.e. charge due to core, and not valence, electrons)
kinetic (np.array): Core kinetic energy density, defined raidally on grid
pspotential (np.array): pseudopotential defined on grid
pscorecharge (np.array): pseudo core charge defined on grid
ls (list): l quantum number for each index
pswaves (list of np.array): pseudo partial waves for each index
aewaves (list of np.array): all electron partial waves for each index
projgrid (np.array): radial grid on which projector functions are defined
recipprojs (list of np.array): reciprocal space projection operators
for each index
realprojs (list of np.array): real space projection operators
for each index
"""
def __init__(self, data):
"""
Initializer for Pseudopotential.
Should only be used by CoreRegion.
Arguments:
data (str): single-element pseudopotential
(POTCAR) as a string
"""
nonradial, radial = data.split("PAW radial sets", 1)
partial_waves = radial.split("pseudo wavefunction")
gridstr, partial_waves = partial_waves[0], partial_waves[1:]
self.pswaves = []
self.aewaves = []
self.recipprojs = []
self.realprojs = []
self.nonlocalprojs = []
self.ls = []
auguccstr, gridstr = gridstr.split("grid", 1)
gridstr, aepotstr = gridstr.split("aepotential", 1)
aepotstr, corechgstr = aepotstr.split("core charge-density", 1)
try:
corechgstr, kenstr = corechgstr.split("kinetic energy-density", 1)
kenstr, pspotstr = kenstr.split("pspotential", 1)
except:
kenstr = "0 0"
corechgstr, pspotstr = corechgstr.split("pspotential", 1)
pspotstr, pscorechgstr = pspotstr.split("core charge-density (pseudized)", 1)
self.grid = self.make_nums(gridstr)
# self.aepotential = self.make_nums(aepotstr)
# self.aecorecharge = self.make_nums(corechgstr)
# self.kinetic = self.make_nums(kenstr)
# self.pspotential = self.make_nums(pspotstr)
# self.pscorecharge = self.make_nums(pscorechgstr)
augstr, uccstr = auguccstr.split("uccopancies in atom", 1)
head, augstr = augstr.split("augmentation charges (non sperical)", 1)
self.augs = self.make_nums(augstr)
for pwave in partial_waves:
lst = pwave.split("ae wavefunction", 1)
self.pswaves.append(self.make_nums(lst[0]))
self.aewaves.append(self.make_nums(lst[1]))
projstrs = nonradial.split("Non local Part")
topstr, projstrs = projstrs[0], projstrs[1:]
self.T = float(topstr[-22:-4])
topstr, atpschgstr = topstr[:-22].split("atomic pseudo charge-density", 1)
try:
topstr, corechgstr = topstr.split("core charge-density (partial)", 1)
settingstr, localstr = topstr.split("local part", 1)
except:
corechgstr = "0 0"
settingstr, localstr = topstr.split("local part", 1)
"""
if "gradient corrections used for XC" in localstr:
localstr, self.gradxc = localstr.split("gradient corrections used for XC", 1)
self.gradxc = int(self.gradxc)
else:
self.gradxc = None
self.localpart = self.make_nums(localstr)
self.localnum = self.localpart[0]
self.localpart = self.localpart[1:]
self.coredensity = self.make_nums(corechgstr)
self.atomicdensity = self.make_nums(atpschgstr)
"""
for projstr in projstrs:
lst = projstr.split("Reciprocal Space Part")
nonlocalvals, projs = lst[0], lst[1:]
self.rmax = self.make_nums(nonlocalvals.split()[2])[0]
nonlocalvals = self.make_nums(nonlocalvals)
l = nonlocalvals[0]
count = nonlocalvals[1]
self.nonlocalprojs.append(nonlocalvals[2:])
for proj in projs:
recipproj, realproj = proj.split("Real Space Part")
self.recipprojs.append(self.make_nums(recipproj))
self.realprojs.append(self.make_nums(realproj))
self.ls.append(l)
settingstr, projgridstr = settingstr.split("STEP =")
self.ndata = int(settingstr.split()[-1])
# projgridstr = projgridstr.split("END")[0]
self.projgrid = (
np.arange(len(self.realprojs[0])) * self.rmax / len(self.realprojs[0])
)
self.step = (self.projgrid[0], self.projgrid[1])
def make_nums(self, numstring):
return np.fromstring(numstring, dtype=np.float64, sep=" ")
class CoreRegion:
"""
List of Pseudopotential objects to describe the core region of a structure.
Attributes:
pps (dict of Pseudopotential): keys are element symbols,
values are Pseudopotential objects
"""
def __init__(self, potcar):
"""
Returns a new CoreRegion object from a
pymatgen.io.vasp.inputs.Potcar class.
Arguments:
potcar (pymatgen.io.vasp.inputs.Potcar): Potcar file for
the VASP calculation
Returns:
CoreRegion object based on potcar
"""
self.pps = {}
for potsingle in potcar:
self.pps[potsingle.element] = Pseudopotential(potsingle.data[:-15])
class Wavefunction(pawpyc.CWavefunction):
"""
Class for storing and manipulating all electron wave functions in the PAW
formalism.
Attributes:
structure (pymatgen.core.structure.Structure): stucture of the material
that the wave function describes
cr (CoreRegion): Contains the pseudopotentials, with projectors and
partials waves, for the structure
dim (np.ndarray, length 3): dimension of the FFT grid used by VASP
and therefore for FFTs in this code
band_props (np.ndarray): 4-item array of containing the information
(band gap, cbm, vbm, is_band_gap_direct). This object contains the same
information as pymatgen.io.vasp.outputs.Vasprun.eigenvalue_band_properties
"""
def __init__(self, struct, pwf, cr, dim, symprec=1e-4, setup_projectors=False):
"""
Arguments:
struct (pymatgen.core.Structure): structure that the wavefunction describes
pwf (pawpyc.PWFPointer): holder class for pswf_t and k-points/k-point weights
cr (CoreRegion): Contains the pseudopotentials, with projectors and
partials waves, for the structure
dim (pymatgen.io.vasp.outputs.Outcar OR np.ndarry OR list of length 3):
Outcar object for reading ngf or the dimensions NG* of the FFT grid
symprec (float, 1e-4): precision tolerance for symmetry operations
setup_projectors (bool, False): Whether to set up the core region
components of the wavefunctions. Pawpyseed will set up the projectors
automatically when they are first needed, so this generally
can be left as False.
Returns:
Wavefunction object
"""
self.band_props = pwf.band_props.copy(order="C")
super().__init__(pwf)
if self.ncl:
raise PAWpyError(
"Pseudowavefunction is noncollinear! Call NCLWavefunction(...) instead"
)
self.structure = struct
self.symprec = symprec
self.cr = cr
self.dim = np.array(dim).astype(np.int32)
if len(dim) != 3:
raise PAWpyError("Grid dimensions must be length 3")
if setup_projectors:
self.check_c_projectors()
def check_band_index(self, b):
if b < 0 or b >= self.nband:
raise ValueError(
"Invalid band {}. Should be in range [{}, {}]".format(
b, 0, self.nband - 1
)
)
def check_kpoint_index(self, k):
if k < 0 or k >= self.nwk:
raise ValueError(
"Invalid kpoint index {}. Should be in range [{}, {}]".format(
k, 0, self.nwk - 1
)
)
def check_spin_index(self, s):
if s < 0 or s >= self.nspin:
raise ValueError(
"Spin must be 0 for non-spin-polarized or 0 or 1 for spin-polarized."
)
def check_bks_spec(self, b, k, s):
self.check_band_index(b)
self.check_kpoint_index(k)
self.check_spin_index(s)
def update_dim(self, dim):
self.dim = np.array(dim, dtype=np.int32)
self.update_dimv(dim)
def desymmetrized_copy(
self, allkpts=None, weights=None, symprec=None, time_reversal_symmetry=True
):
"""
Returns a copy of self with a k-point mesh that is not reduced
using crystal symmetry.
Arguments:
allkpts (optional, None): An optional k-point mesh to map
onto. Used by the Projector class for some cases
weights (optional, None): If allkpts is not None, weights
should contain the k-point weights of each k-point,
with the sum normalized to 1.
symprec: Symmetry precision to use when determining the space group.
If None, the symmetry precision used to generate the
Wavefunction will be used (the default).
time_reversal_symmetry: Whether time reversal symmetry is used.
"""
if not symprec:
symprec = self.symprec
pwf = self._desymmetrized_pwf(
self.structure,
self.band_props,
allkpts,
weights,
symprec,
time_reversal_symmetry,
)
new_wf = Wavefunction(self.structure, pwf, self.cr, self.dim, symprec=symprec)
return new_wf
@staticmethod
def from_files(
struct="CONTCAR",
wavecar="WAVECAR",
cr="POTCAR",
vr="vasprun.xml",
setup_projectors=False,
):
"""
Construct a Wavefunction object from file paths.
Arguments:
struct (str): VASP POSCAR or CONTCAR file path
wavecar (str): VASP WAVECAR file path
cr (str): VASP POTCAR file path
vr (str): VASP vasprun file path
outcar (str): VASP OUTCAR file path
setup_projectors (bool, False): Whether to set up the core region
components of the wavefunctions. Pawpyseed will set up the projectors
automatically when they are first needed, so this generally
can be left as False.
Returns:
Wavefunction object
"""
for fname in [struct, wavecar, cr, vr]:
if not os.path.isfile(fname):
raise FileNotFoundError(f"File {fname} does not exist.")
vr = Vasprun(vr)
dim = np.array(
[vr.parameters["NGX"], vr.parameters["NGY"], vr.parameters["NGZ"]]
)
symprec = vr.parameters["SYMPREC"]
pwf = pawpyc.PWFPointer(wavecar, vr)
return Wavefunction(
Poscar.from_file(struct).structure,
pwf,
CoreRegion(Potcar.from_file(cr)),
dim,
symprec,
setup_projectors,
)
@staticmethod
def from_directory(path, setup_projectors=False):
"""
Assumes VASP output has the default filenames and is located
in the directory specificed by path.
Arguments:
path (str): VASP output directory
setup_projectors (bool, False): Whether to set up the core region
components of the wavefunctions. Pawpyseed will set up the projectors
automatically when they are first needed, so this generally
can be left as False.
Returns:
Wavefunction object
"""
filepaths = []
for d in ["CONTCAR", "WAVECAR", "POTCAR", "vasprun.xml"]:
filepaths.append(str(os.path.join(path, d)))
args = filepaths + [setup_projectors]
return Wavefunction.from_files(*args)
@staticmethod
def from_atomate_directory(path, setup_projectors=False):
"""
Assumes VASP output has the default filenames and is located
in the directory specificed by path. Checks for
gzipped files created by atomate
Arguments:
path (str): VASP output directory
setup_projectors (bool, False): Whether to set up the core region
components of the wavefunctions. Pawpyseed will set up the projectors
automatically when they are first needed, so this generally
can be left as False.
Returns:
Wavefunction object
"""
files = ["CONTCAR", "WAVECAR", "POTCAR", "vasprun.xml"]
paths = []
for file in files:
filepat = os.path.join(path, file + ".relax2.gz")
if not os.path.exists(filepat):
filepat = os.path.join(path, file + ".relax1.gz")
if not os.path.exists(filepat):
filepat = os.path.join(path, file + ".gz")
if not os.path.exists(filepat):
filepat = os.path.join(path, file)
if not os.path.exists(filepat):
print(f"Could not find {file}! Skipping this defect...")
return False
paths.append(filepat)
args = paths + [setup_projectors]
wf = Wavefunction.from_files(*args)
return wf
def _make_c_projectors(self):
"""
Uses the CoreRegion objects in self
to construct C representations of the projectors and partial waves
for a structure. Also assigns numerical labels for each element and
setups up a list of indices and positions which can be easily converted
to C lists for projection routines.
"""
pps = {}
labels = {}
label = 0
for e in self.cr.pps:
pps[label] = self.cr.pps[e]
labels[e] = label
label += 1
nums = np.array([labels[el(s)] for s in self.structure], dtype=np.int32)
coords = np.array([], dtype=np.float64)
self.num_sites = len(self.structure)
self.num_elems = len(pps)
for s in self.structure:
coords = np.append(coords, s.frac_coords)
grid_encut = (np.pi * self.dim / self.structure.lattice.abc) ** 2 / 0.262
self._c_projector_setup(
self.num_elems, self.num_sites, max(grid_encut), nums, coords, self.dim, pps
)
def check_c_projectors(self):
"""
Check to see if the projector functions have been read in and set up.
If not, do so.
"""
if not self.projector_owner:
start = time.monotonic()
self._make_c_projectors()
end = time.monotonic()
print(
"--------------\nran setup_projections in %f seconds\n---------------"
% (end - start)
)
def get_state_realspace(self, b, k, s, dim=None, remove_phase=False):
"""
Returns the real and imaginary parts of a given band.
Args:
b (int): band number
k (int): kpoint number
s (int): spin number
dim (numpy array of 3 ints): dimensions of the FFT grid
Returns:
A 3D array (indexed by x,y,z where x,y,z are fractional coordinates)
with complex double values for the realspace wavefunction
"""
self.check_c_projectors()
if dim is not None:
self.update_dim(np.array(dim))
return self._get_realspace_state(b, k, s, remove_phase)
def get_state_realspace_density(self, b, k, s, dim=None):
"""
Returns the real and imaginary parts of a given band.
Args:
b (int): band number
k (int): kpoint number
s (int): spin number
dim (numpy array of 3 ints): dimensions of the FFT grid
Returns:
A 3D array (indexed by x,y,z where x,y,z are fractional coordinates)
with complex double values for the realspace wavefunction
"""
self.check_c_projectors()
if dim is not None:
self.update_dim(np.array(dim) // 2)
return self._get_realspace_state_density(b, k, s)
def get_realspace_density(self, dim=None, bands=None):
"""
Returns the all electron charge density.
Args:
dim (numpy array of 3 ints, None): dimensions of the FFT grid
Returns:
A 3D array (indexed by x,y,z where x,y,z are fractional coordinates)
with real double values for the all electron charge density
"""
self.check_c_projectors()
if dim is not None:
self.update_dim(np.array(dim) // 2)
return self._get_realspace_density()
def _convert_to_vasp_volumetric(self, filename, dim):
"""
Utility function to convert pawpyseed volumetric
output to VASP volumetric output.
"""
# from pymatgen VolumetricData class
p = Poscar(self.structure)
lines = filename + "\n"
lines += " 1.00000000000000\n"
latt = self.structure.lattice.matrix
lines += " %12.6f%12.6f%12.6f\n" % tuple(latt[0, :])
lines += " %12.6f%12.6f%12.6f\n" % tuple(latt[1, :])
lines += " %12.6f%12.6f%12.6f\n" % tuple(latt[2, :])
lines += "".join(["%5s" % s for s in p.site_symbols]) + "\n"
lines += "".join(["%6d" % x for x in p.natoms]) + "\n"
lines += "Direct\n"
for site in self.structure:
lines += "%10.6f%10.6f%10.6f\n" % tuple(site.frac_coords)
lines += " \n"
f = open(filename)
nums = f.read()
f.close()
f = open(filename, "w")
dimstr = "%d %d %d\n" % (dim[0], dim[1], dim[2])
# pos = Poscar(self.structure, velocities = None)
# posstr = pos.get_string() + '\n'
f.write(lines + dimstr + nums)
f.close()
def write_state_realspace(
self, b, k, s, fileprefix="", dim=None, scale=1, remove_phase=False
):
"""
Writes the real and imaginary parts of a given band to two files,
prefixed by fileprefix
Args:
b (int): band number (0-indexed!)
k (int): kpoint number (0-indexed!)
s (int): spin number (0-indexed!)
fileprefix (string, ""): first part of the file name
dim (numpy array of 3 ints, None): dimensions of the FFT grid
scale (scalar, 1): number to multiply the realspace wavefunction by.
For example, VASP multiplies charge density by the volume
of the structure.
remove_phase (False): If True, removes the e^(ikr) phase
from the wavefunction (this does not necessarily mean
the wavefunction is real). This is useful if you want
to visualize the wavefunction because the e^(ikr) phase
makes the wavefunction non-periodic
Returns:
A 3D array (indexed by x,y,z where x,y,z are fractional coordinates)
with complex double values for the realspace wavefunction
The wavefunction is written in two files with z the slow index.
"""
self.check_c_projectors()
if dim is not None:
self.update_dim(np.array(dim))
filename_base = "%sB%dK%dS%d" % (fileprefix, b, k, s)
filename1 = "%s_REAL.vasp" % filename_base
filename2 = "%s_IMAG.vasp" % filename_base
res = self._write_realspace_state(
filename1, filename2, scale, b, k, s, remove_phase
)
self._convert_to_vasp_volumetric(filename1, self.dim)
self._convert_to_vasp_volumetric(filename2, self.dim)
return res
def write_density_realspace(
self, filename="PYAECCAR.vasp", dim=None, scale=1, bands=None
):
"""
Writes the real and imaginary parts of a given band to two files,
prefixed by fileprefix
Args:
b (int): band number (0-indexed!)
k (int): kpoint number (0-indexed!)
s (int): spin number (0-indexed!)
fileprefix (string, ""): first part of the file name
dim (numpy array of 3 ints, None): dimensions of the FFT grid
scale (scalar, 1): number to multiply the realspace wavefunction by.
For example, VASP multiplies charge density by the volume
of the structure.
bands (int or [int], None): Only calculate the density for a specific
band or set of bands
Returns:
A 3D array (indexed by x,y,z where x,y,z are fractional coordinates)
with complex double values for the realspace wavefunction
The charge density is written with z the slow index.
"""
self.check_c_projectors()
if dim is not None:
self.update_dim(np.array(dim) // 2)
res = self._write_realspace_density(filename, scale, bands)
self._convert_to_vasp_volumetric(filename, self.dim * 2)
return res
def get_nosym_kpoints(
self, init_kpts=None, symprec=None, gen_trsym=True, fil_trsym=True
):
"""
Helper function to get a non-symmetry-reduced k-point
mesh based on the symmetry-reduced mesh of self.
"""
if symprec == None:
symprec = self.symprec
return pawpy_symm.get_nosym_kpoints(
kpts, self.structure, init_kpts, symprec, gen_trsym, fil_trsym
)
def get_kpt_mapping(self, allkpts, symprec=None, gen_trsym=True):
"""
Helper function to find the mappings from self.kpts to
allkpts using the symmetry operations of self.structure
"""
if symprec == None:
symprec = self.symprec
return pawpy_symm.get_kpt_mapping(
allkpts, self.kpts, self.structure, symprec, gen_trsym
)
|
kylebystrom/pawpyseed | pawpyseed/analysis/defect_composition.py | ## File presenting the PawpyData class
# and its children for analysis of pawpyseed
# output primarily from core.projector.Projector
import os
import matplotlib.pyplot as plt
import numpy as np
import yaml
from pymatgen.electronic_structure.core import Spin
from pymatgen.io.vasp.inputs import Poscar
from pymatgen.io.vasp.outputs import Vasprun
class PawpyData:
def __init__(self, structure, data, dos=None, vbm=None, cbm=None):
"""
Arguments:
structure (pymatgen.core.structure.Structure): crystal structure
data: Whatever data is stored
dos (pymatgen.electronic_structure.dos.DOS or list, None): A pymatgen
density of states or a list containing: 1) energies, 2) density
of states values at those energies, 3) the Fermi level.
vbm (float, None): valence band maximum
cbm (float, None): conduction band minimum
"""
if type(structure) == str:
structure = Poscar.from_string(structure).structure
if dos is None:
self.energies = None
self.densities = None
self.efermi = None
elif type(dos) == list:
self.energies = dos[0]
self.densities = dos[1]
self.efermi = dos[2]
else:
self.energies = dos.energies
self.densities = (dos.densities[Spin.up] + dos.densities[Spin.down]) / 2
self.efermi = dos.efermi
self.structure = structure
self.data = data
self.cbm = cbm
self.vbm = vbm
if (not (cbm is None)) and (not (vbm is None)):
self.bandgap = max(0, cbm - vbm)
else:
self.bandgap = None
def set_band_properties(vbm, cbm):
"""
Set the VBM, CBM, and band gap.
Arguments:
vbm (float): valence band maximum
cbm (float): conduction band minimum
"""
self.bandgap = max(0, cbm - vbm)
self.cbm = cbm
self.vbm = vbm
def as_dict(self):
"""
Returns a representation of the PawpyData
as a dictionary.
"""
data = {}
data["structure"] = Poscar(self.structure).get_string()
data["energies"] = self.energies
data["densities"] = self.densities
data["data"] = self.data
data["vbm"] = self.vbm
data["cbm"] = self.cbm
data["efermi"] = self.efermi
return data
def write_yaml(self, filename):
"""
Write the PawpyData as a yaml file
called filename
"""
data = self.as_dict()
f = open(filename, "w")
yaml.dump(data, f)
f.close()
@classmethod
def from_dict(cls, data):
"""
Takes the dictionary--data--and
returns a PawpyData instance.
"""
return cls(
data["structure"],
data["data"],
dos=[data["energies"], data["densities"], data["efermi"]],
vbm=data["vbm"],
cbm=data["cbm"],
)
@classmethod
def from_yaml(cls, filename):
"""
Reads a PawpyData instance from
a file called filename.
"""
f = open(filename)
data = yaml.load(f.read().encode("utf-8"), Loader=yaml.Loader)
return cls.from_dict(data)
class BulkCharacter(PawpyData):
"""
The data member for the BulkCharacter is dictionary of form
{band_index : (v, c)}, where v and c are the valence and
conduction character of the band, respectively.
"""
def __init__(
self,
structure,
data,
energy_levels=None,
dos=None,
vbm=None,
cbm=None,
metadata=None,
):
"""
Arguments:
structure (pymatgen.core.structure.Structure): crystal structure
energy_levels (list of list): list of energy levels at the different
k-points and spins in each band. Returned by Projector.defect_band_analysis
data: {band_index : (v, c)}, where v and c are the valence and
conduction character of the band, respectively.
Returned by Projector.defect_band_analysis
dos (pymatgen.electronic_structure.dos.DOS or list, None): A pymatgen
density of states or a list containing: 1) energies, 2) density
of states values at those energies, 3) the Fermi level.
vbm (float, None): valence band maximum
cbm (float, None): conduction band minimum
"""
self.energy_levels = energy_levels
self.kws = None
self.nspin = None
if metadata is not None:
if "kws" in metadata:
self.kws = metadata["kws"]
if "nspin" in metadata:
self.nspin = metadata["nspin"]
self.metadata = metadata
super().__init__(structure, data, dos, vbm, cbm)
def as_dict(self):
data = super().as_dict()
data["energy_levels"] = self.energy_levels
data["metadata"] = {}
if self.nspin is not None:
data["metadata"]["nspin"] = self.nspin
if self.kws is not None:
data["metadata"]["kws"] = self.kws
return data
@classmethod
def from_dict(cls, data):
"""
Takes the dictionary--data--and
returns a PawpyData instance.
"""
if "metadata" in data:
metadata = data["metadata"]
else:
metadata = None
return cls(
data["structure"],
data["data"],
energy_levels=data["energy_levels"],
dos=[data["energies"], data["densities"], data["efermi"]],
vbm=data["vbm"],
cbm=data["cbm"],
metadata=metadata,
)
def plot(self, name, title=None, spinpol=False):
"""
Plot the bulk character data. If the energy levels are available,
those are plotted for reference. Otherwise, if the dos is available,
it is plotted for reference.
Arguments:
name (str): Name of the file to which the plot is saved.
Spaces will be replaced with underscores.
title (str, None): Title of the plot. Defaults to name,
so it is recommended to set this.
spinpol (bool, False): Show the spin polarized valence
and conduction character. Only works if the VASP
calculation was spin polarized
"""
kws = self.kws if (self.kws is not None) else np.array([])
if spinpol:
if self.nspin == 2:
spin = 2
else:
raise ValueError("Must verify that nspin==2 to use spinpol")
else:
spin = 1
if self.nspin == 2:
kws = np.append(kws, kws)
elif (
self.nspin == None
and len(kws) == len(list(self.energy_levels.values())[0]) // 2
):
kws = np.append(kws, kws)
if (kws is not None) and (
len(kws) != len(list(self.energy_levels.values())[0])
) // spin:
raise ValueError("kws is the wrong shape")
if title == None:
title = name
bs = []
vs = []
cs = []
for b in self.data:
bs.append(b)
vs.append(self.data[b][0][0])
vs.append(self.data[b][0][1])
cs.append(self.data[b][1][0])
cs.append(self.data[b][1][1])
bandgap = self.bandgap
bs = np.array(bs) - np.mean(bs)
xticks = bs[:]
cs = np.array(cs)
vs = np.array(vs)
if self.energy_levels == None:
if self.energies is None or self.efermi is None or self.densities is None:
fig, ax1 = plt.subplots()
else:
fig, (ax1, ax3) = plt.subplots(
2, 1, gridspec_kw={"height_ratios": [3, 1]}, figsize=[6.4, 6.4]
)
else:
fig, (ax1, ax3) = plt.subplots(
2, 1, gridspec_kw={"height_ratios": [1, 1]}, figsize=[6.4, 8]
)
ax1.set_xlabel("band", fontsize=18)
ax1.set_xticks(xticks)
ax1.set_ylabel("valence", color="b", fontsize=18)
ax1.bar(bs - 0.2, vs[::2], width=0.4, color="b")
ax1.bar(bs + 0.2, vs[1::2], width=0.4, color="b")
ax1.set_ylim(0, 1)
ax2 = ax1.twinx()
ax2.set_ylabel("conduction", color="r", fontsize=18)
ax2.bar(bs - 0.2, cs[::2], width=0.4, color="r")
ax2.bar(bs + 0.2, cs[1::2], width=0.4, color="r")
ax2.set_ylim(0, 1)
ax2.invert_yaxis()
plt.title(title + " band character", fontsize=20)
# plt.savefig('BAND_'+name)
if self.energy_levels == None:
if not (
self.energies is None or self.efermi is None or self.densities is None
):
ax3.plot(self.energies - self.efermi, self.densities)
ax3.set_xlabel("Energy (eV)", fontsize=18)
ax3.set_ylabel("Total DOS", fontsize=18)
ax3.set_xlim(-2, 2)
ax3.set_ylim(0, max(self.densities))
else:
bs = list(self.energy_levels.keys())
bmean = np.mean(bs)
cmap = plt.get_cmap("rainbow")
for b in self.energy_levels:
i = 0
delta = 0.8 / spin # len(self.energy_levels[b])
enlists = []
occlists = []
for s in range(spin):
length = len(self.energy_levels[b])
interval = (s * length // spin, (s + 1) * length // spin)
enlists.append(
[self.energy_levels[b][t][0] for t in range(*interval)]
)
occlists.append(
[self.energy_levels[b][t][1] for t in range(*interval)]
)
for i, endat in enumerate(zip(enlists, occlists)):
enlist, occlist = endat
color = cmap(1 - np.mean(occlist))
disp = i * delta - 0.4
span = [b - bmean + disp, b - bmean + disp + delta]
if self.kws is None:
en = np.mean(enlist)
else:
en = np.dot(enlist, kws)
(en - min(enlist), max(enlist) - en)
ax3.bar(
b - bmean + disp + delta / 2,
max(enlist) - min(enlist),
width=delta,
bottom=min(enlist) - self.efermi,
color="0.8",
)
ax3.plot(span, [en - self.efermi] * 2, color=color)
# for en, occ in self.energy_levels[b]:
# color = cmap(1-occ)
# disp = i * delta - 0.4
# span = [b-bmean+disp, b-bmean+disp+delta]
# ax3.plot(span,
# [en - self.efermi] * 2,
# color = color)
# i += 1
if self.vbm != None and self.cbm != None:
bmin = min(bs) - bmean - 0.5
bmax = max(bs) - bmean + 0.5
ax3.plot([bmin, bmax], [0, 0], color="black")
ax3.plot(
[bmin, bmax],
[self.cbm - self.vbm, self.cbm - self.vbm],
color="black",
)
elif bandgap != None:
bmin = min(bs) - bmean - 0.5
bmax = max(bs) - bmean + 0.5
ax3.plot([bmin, bmax], [0, 0], color="black")
ax3.plot([bmin, bmax], [bandgap, bandgap], color="black")
ax3.set_xlabel("band", fontsize=18)
ax3.set_xticks(xticks)
ax3.set_ylabel("Energy (eV)", fontsize=18)
plt.tight_layout()
plt.savefig(name.replace(" ", "_"))
@staticmethod
def makeit(generator):
# Example:
# >>> def_lst = ['charge_1', 'charge_0', 'charge_-1']
# >>> generator = Projector.setup_multiple_protections('bulk', def_lst)
# >>> objs = BulkComposition.makeit(generator)
bcs = {}
for wf_dir, pr in generator:
vr = Vasprun(os.path.join(wf_dir, "vasprun.xml"))
bg, cbm, vbm, _ = vr.eigenvalue_band_properties
dos = vr.tdos
data, energy_levels = pr.defect_band_analysis(
num_above_ef=5, num_below_ef=5, spinpol=True, return_energies=True
)
bcs[wf_dir] = BulkCharacter(
pr.wf.structure,
data,
energy_levels=energy_levels,
dos=dos,
vbm=vbm,
cbm=cbm,
metadata={"nspin": pr.wf.nspin, "kws": pr.wf.kws},
)
return bcs
class BasisExpansion(PawpyData):
"""
The data member for the BasisExpansion is 2D array of shape
(wf.nband, basis.nband * basis.nwk * basis.nspin).
Each item is the projection of a band of wf onto a band of
basis for a given k-point index and spin index.
"""
@staticmethod
def makeit(generator):
# Example:
# >>> def_lst = ['charge_1', 'charge_0', 'charge_-1']
# >>> generator = Projector.setup_multiple_protections('bulk', def_lst)
# OR
# >>> generator = Projector.setup_multiple_projections(*pycdt_dirs('.'))
#
# >>> objs = BasisExpansion.makeit(generator)
bes = {}
for wf_dir, pr in generator:
vr = Vasprun(os.path.join(wf_dir, "vasprun.xml"))
bg, cbm, vbm, _ = vr.eigenvalue_band_properties
dos = vr.tdos
basis = pr.basis
expansion = np.zeros(
(pr.wf.nband, basis.nband * basis.nwk * basis.nspin),
dtype=np.complex128,
)
for b in range(pr.wf.nband):
expansion[b, :] = pr.single_band_projection(b)
bes[wf_dir] = BasisExpansion(
pr.wf.structure, expansion, dos=dos, vbm=vbm, cbm=cbm
)
return bes
def pycdt_dirs(top_dir):
bulk = os.path.join(top_dir, "bulk")
wfdirs = []
for root, dirs, files in os.walk(top_dir):
if "bulk" in root or "dielectric" in root:
continue
if "OUTCAR" in files:
wfdirs.append(root)
return bulk, wfdirs
|
kylebystrom/pawpyseed | pawpyseed/core/quadrature.py | <reponame>kylebystrom/pawpyseed
import numpy as np
MAXSIZE = 100
f = open("quadrature.c", "w")
f.write('#include "quadrature.h"\n\n')
x = []
for i in range(3, MAXSIZE + 1):
x.append(np.polynomial.legendre.leggauss(i))
f.write("double QUADRATURE_POINTS[%d][%d] = {\n" % (MAXSIZE - 2, MAXSIZE + 1))
for i in range(MAXSIZE - 2):
print(i)
currstr = str(x[i][0].tolist()).replace("[", "{").replace("]", "}")
f.write("\t" + currstr)
if i != MAXSIZE:
f.write(",\n")
f.write("}\n\n")
f.write("double QUADRATURE_WEIGHTS[%d][%d] = {\n" % (MAXSIZE - 2, MAXSIZE + 1))
for i in range(MAXSIZE - 2):
currstr = str(x[i][1].tolist()).replace("[", "{").replace("]", "}")
f.write("\t" + currstr)
if i != MAXSIZE:
f.write(",\n")
f.write("}\n")
f.close()
|
kylebystrom/pawpyseed | pawpyseed/core/tests/test_momentum.py | import os
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal, assert_raises
class PawpyTestError(Exception):
"""
Class for handling errors that occur during execution
of Python functions in pawpyseed
"""
def __init__(self, msg):
self.msg = msg
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
from pawpyseed.core.momentum import MomentumMatrix
from pawpyseed.core.noncollinear import NCLWavefunction
from pawpyseed.core.wavefunction import Wavefunction
class TestMomentumMatrix:
def setup(self):
self.currdir = os.getcwd()
os.chdir(os.path.join(MODULE_DIR, "../../../test_files"))
self.initialize_wf_and_mm()
def initialize_wf_and_mm(self):
SIZE = 60
self.wf = Wavefunction.from_directory(".")
vol = self.wf.structure.volume
self.realspace_wf = self.wf.get_state_realspace(
0, 0, 0, dim=(SIZE, SIZE, SIZE), remove_phase=True
)
# self.realspace_chg = np.abs(self.realspace_wf)**2
self.realspace_chg = self.wf.get_state_realspace_density(
0, 0, 0, dim=(SIZE, SIZE, SIZE)
)
self.recipspace_wf = np.fft.fftn(self.realspace_wf) / SIZE ** 3 * np.sqrt(vol)
self.recipspace_chg = np.fft.fftn(self.realspace_chg) / SIZE ** 3 * vol
self.mm_real = MomentumMatrix(self.wf, encut=3000)
self.mm_direct = MomentumMatrix(self.wf)
self.mm_direct2 = MomentumMatrix(self.wf, encut=self.wf.encut)
self.ncl_wf = NCLWavefunction.from_directory("noncollinear")
self.ncl_realspace_wf = self.ncl_wf.get_state_realspace(
0, 0, 0, dim=(SIZE, SIZE, SIZE), remove_phase=True
)
self.ncl_recipspace_wf = (
np.fft.fftn(self.ncl_realspace_wf[0]) / SIZE ** 3 * np.sqrt(vol),
np.fft.fftn(self.ncl_realspace_wf[1]) / SIZE ** 3 * np.sqrt(vol),
)
def teardown(self):
os.chdir(self.currdir)
def test_ncl_transform(self):
chg = np.sum(np.abs(self.ncl_recipspace_wf[0]) ** 2) + np.sum(
np.abs(self.ncl_recipspace_wf[1]) ** 2
)
assert_array_almost_equal(chg, 1, 3)
def test_encut_insensitivity(self):
res = self.mm_direct.get_momentum_matrix_elems(0, 0, 0, 0, 0, 0)
res2 = self.mm_direct2.get_momentum_matrix_elems(0, 0, 0, 0, 0, 0)
assert_almost_equal(res[0], 1, 4)
assert_almost_equal(res2[0], 1, 4)
assert_almost_equal(res[:6], res2[:6], 7)
def test_get_momentum_matrix_elems(self):
res = self.mm_direct.get_momentum_matrix_elems(0, 0, 0, 0, 0, 0)
grid = self.mm_direct.momentum_grid
for i in range(grid.shape[0]):
if (np.abs(grid[i]) < 3).all():
# print(grid[i], res[i], self.recipspace_chg[grid[i][0],grid[i][1],grid[i][2]])
assert_almost_equal(
res[i], self.recipspace_chg[grid[i][0], grid[i][1], grid[i][2]], 3
)
with assert_raises(ValueError):
self.mm_direct.get_momentum_matrix_elems(0, 0, 0, 0, -1, 0)
def test_get_reciprocal_fullfw(self):
res = self.mm_real.get_reciprocal_fullfw(0, 0, 0)
print("check size", np.sum(np.abs(res) ** 2))
grid = self.mm_real.momentum_grid
for i in range(grid.shape[0]):
if (np.abs(grid[i]) < 2).all():
# print(grid[i], res[i], self.recipspace_wf[grid[i][0],grid[i][1],grid[i][2]])
assert_almost_equal(
res[i], self.recipspace_wf[grid[i][0], grid[i][1], grid[i][2]], 3
)
with assert_raises(ValueError):
self.mm_real.get_reciprocal_fullfw(50, 0, 0)
def test_g_from_wf(self):
grid = self.mm_real.momentum_grid
for i in range(grid.shape[0]):
if (np.abs(grid[i]) < 2).all():
# print(grid[i], self.mm_real.g_from_wf(0,0,0,0,0,0,grid[i]), self.recipspace_chg[grid[i][0],grid[i][1],grid[i][2]])
assert_almost_equal(
self.mm_real.g_from_wf(0, 0, 0, 0, 0, 0, grid[i]),
self.recipspace_chg[grid[i][0], grid[i][1], grid[i][2]],
3,
)
with assert_raises(ValueError):
self.mm_real.g_from_wf(100, 0, 0, 0, 0, 0, [0, 0, 0])
|
kylebystrom/pawpyseed | pawpyseed/analysis/tests/test_defect_composition.py | <reponame>kylebystrom/pawpyseed<gh_stars>10-100
import os
from pawpyseed.analysis.defect_composition import *
from pawpyseed.core.projector import Projector
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
class PawpyTestError(Exception):
"""
Class for handling errors that occur during execution
of Python functions in pawpyseed
"""
def __init__(self, msg):
self.msg = msg
class TestBulkCharacter:
def setup(self):
os.chdir(os.path.join(MODULE_DIR, "../../../test_files"))
generator = Projector.setup_multiple_projections(".", [".", "."])
self.bcs = BulkCharacter.makeit(generator)
def teardown(self):
pass
def test_plot(self):
self.bcs["."].plot("tst")
self.bcs["."].plot("tst", spinpol=True)
def test_read_write(self):
self.bcs["."].write_yaml("test_file.yaml")
tst = BulkCharacter.from_yaml("test_file.yaml")
assert type(tst.structure) == type(self.bcs["."].structure)
assert tst.structure == self.bcs["."].structure
assert (tst.energies == self.bcs["."].energies).all()
assert (tst.densities == self.bcs["."].densities).all()
assert tst.efermi == self.bcs["."].efermi
assert tst.data == self.bcs["."].data
assert tst.vbm == self.bcs["."].vbm
assert tst.cbm == self.bcs["."].cbm
assert tst.energy_levels == self.bcs["."].energy_levels
assert tst.nspin == 2
assert tst.kws[0] == 0.5
assert tst.kws[1] == 0.5
class TestBasisExpansion:
def setup(self):
os.chdir(os.path.join(MODULE_DIR, "../../../test_files"))
generator = Projector.setup_multiple_projections(".", [".", "."])
self.bes = BasisExpansion.makeit(generator)
def teardown(self):
pass
def test_read_write(self):
self.bes["."].write_yaml("test_file.yaml")
tst = BasisExpansion.from_yaml("test_file.yaml")
assert type(tst.structure) == type(self.bes["."].structure)
assert tst.structure == self.bes["."].structure
assert (tst.energies == self.bes["."].energies).all()
assert (tst.densities == self.bes["."].densities).all()
assert tst.efermi == self.bes["."].efermi
assert (tst.data == self.bes["."].data).all()
assert tst.vbm == self.bes["."].vbm
assert tst.cbm == self.bes["."].cbm
|
kylebystrom/pawpyseed | pawpyseed/core/gaunt.py | """
\file
Python script for generating Gaunt coefficients and factors used for offsite
partial wave overlap integrals.
"""
import numpy as np
from sympy import N
from sympy.physics.wigner import gaunt, wigner_3j
gcs = np.zeros([4, 4, 4, 7, 4])
facs = np.zeros([4, 4, 4, 7, 4])
print(gaunt(1, 0, 1, 1, 0, 0))
print(N(gaunt(1, 0, 1, 1, 0, -1)))
print(type(N(gaunt(1, 0, 1, 1, 0, -1))))
for l1 in range(4):
for l2 in range(l1 + 1):
for l3 in range(abs(l1 - l2), l1 + l2 + 1, 2):
for m1 in range(-l1, l1 + 1):
for m2 in range(0, l2 + 1):
val = N(gaunt(l1, l2, l3, m1, m2, -m1 - m2))
gcs[l1][l2][(l3 - abs(l1 - l2)) // 2][l1 + m1][m2] = val
val2 = N(wigner_3j(l1, l2, l3, 0, 0, 0)) * N(
wigner_3j(l1, l2, l3, -m1, m2, m1 - m2)
)
val3 = np.sqrt(
(2 * l1 + 1) * (2 * l2 + 1) * (2 * l3 + 1) / 4 / np.pi
)
facs[l1][l2][(l3 - abs(l1 - l2)) // 2][l1 + m1][m2] = val2 * val3
print(val, val2 * val3)
f = open("gaunt.c", "w")
f.write('#include "quadrature.h"\n\n')
f.write("double GAUNT_COEFF[%d][%d][%d][%d][%d] = " % (4, 4, 4, 7, 4))
f.write(
(str(gcs.tolist()).replace("[", "{").replace("]", "}").replace("}, ", "},\n"))
+ ";\n\n"
)
f.write("double SBTFACS[%d][%d][%d][%d][%d] = " % (4, 4, 4, 7, 4))
f.write(
(str(facs.tolist()).replace("[", "{").replace("]", "}").replace("}, ", "},\n"))
+ ";\n\n"
)
f.close()
f = open("gaunt.h", "w")
f.write("#ifndef GAUNT_H\n#define GAUNT_H\n\n")
f.write("extern double GAUNT_COEFF[%d][%d][%d][%d][%d];\n" % (4, 4, 4, 7, 4))
f.write("extern double SBTFACS[%d][%d][%d][%d][%d];\n" % (4, 4, 4, 7, 4))
f.write("\n#endif\n")
f.close()
|
kylebystrom/pawpyseed | pawpyseed/core/utils.py | <reponame>kylebystrom/pawpyseed
## @package pawpyseed.core.utils
# Miscellaneous utilities file for the Python portion of the code..
class PAWpyError(Exception):
"""
Class for handling errors that occur during execution
of Python functions in pawpyseed
"""
def __init__(self, msg):
self.msg = msg
class PAWpyWarning(Warning):
"""
Class for handling warnings taht occur during execution
of Python functions in pawpyseed
"""
def __init__(self, msg):
self.msg = msg
def check_spin(spin, nspin):
"""
Utility to check if the spin input parameter to single_band_projection
and similar functions is allowed given nspin of the wavefunction object
being analyzed. Returns a new value of spin if spin must be changed,
raises an error if spin is not allowed.
"""
if spin >= 0:
if spin >= nspin:
raise PAWpyError(
"spin must be less than nspin. spin is %d, nspin is %d" % (spin, nspin)
)
else:
return 1
return nspin
def el(site):
"""
Return the element symbol of a pymatgen
site object
"""
return site.specie.symbol
|
kylebystrom/pawpyseed | setup.py | import codecs
import configparser
import os
import sys
import numpy as np
from setuptools import Extension, setup
class PawpyBuildError(Exception):
pass
try:
from Cython.Build import cythonize
except ImportError:
raise ImportError(
"Need Cython>=0.29.21 to build pawpyseed. Please run 'pip install cython'"
)
with codecs.open("README.md", "r", encoding="utf8") as fh:
long_description = fh.read()
DEBUG = True
srcfiles = [
"density",
"gaunt",
"linalg",
"projector",
"pseudoprojector",
"quadrature",
"radial",
"reader",
"sbt",
"utils",
"momentum",
]
# SEARCH FOR AND READ CONFIGURATION FILE
config = configparser.ConfigParser()
user_cfg_file = os.path.expanduser("~/.pawpyseed-site.cfg")
if os.path.isfile("site.cfg"):
config.read("site.cfg")
elif os.path.isfile(user_cfg_file):
config.read(user_cfg_file)
else:
config.read_file(open("site.cfg.default"))
# SET COMPILER AND LINKER IF SET IN CONFIG FILE
if "compiler_name" in config["compiler"]:
os.environ["CC"] = config["compiler"]["compiler_name"]
if not "linker_name" in config["compiler"]:
os.environ["LDSHARED"] = config["compiler"]["compiler_name"] + " -shared"
if "linker_name" in config["compiler"]:
os.environ["LDSHARED"] = config["compiler"]["linker_name"]
# SET PARALLELIZATION AND INTERFACE OPTIONS
sdl = config["mkl"].getboolean("sdl")
omp_loops = config["threading"].getboolean("omp_loops")
threaded_mkl = config["threading"].getboolean("threaded_mkl")
libs = []
if sys.platform == "darwin":
# platform_link_args = ['-lmkl_avx512']
link_args = []
os.environ[
"CPATH"
] = "/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include"
else:
link_args = ["-Wl,--no-as-needed"]
if sdl:
libs.extend(["mkl_rt", "iomp5"])
else:
interfacelib = "mkl_intel_lp64"
if threaded_mkl:
threadlib = "mkl_intel_thread"
omplib = "iomp5"
else:
threadlib = "mkl_sequential"
omplib = None
libs.extend([interfacelib, threadlib, "mkl_core"])
if omplib is not None:
libs.append(omplib)
libs.extend(["pthread", "m", "dl"])
# SET OTHER COMPILER ARGS
extra_args = "-std=c11 -fPIC -Wall".split()
if omp_loops:
extra_args.append("-fopenmp")
# ADD ADDITIONAL MKL LIBRARIES IF FOUND IN CONFIG
lib_dirs = []
include_dirs = ["pawpyseed/core", np.get_include()]
if "root" in config["mkl"]:
root_dirs = config["mkl"]["root"].split(":")
for r in root_dirs:
lib_dirs.append(os.path.join(r, "lib/intel64"))
lib_dirs.append(os.path.join(r, "lib"))
include_dirs.append(os.path.join(r, "include"))
if "extra_libs" in config["compiler"]:
extra_dirs = config["compiler"]["extra_libs"].split(":")
for d in extra_dirs:
lib_dirs.append(d)
macros = [
("MKL_Complex16", "double complex"),
("MKL_Complex8", "float complex"),
]
cython_macros = {} # Cython macros in .pyx files
comp_direct = { # compiler_directives
"language_level": 3, # use python 3
"embedsignature": True, # write function signature in doc-strings
}
# using path search of tenpy: https://github.com/tenpy/tenpy/blob/main/setup.py
HAVE_MKL = 0
MKL_DIR = os.getenv("MKL_DIR", os.getenv("MKLROOT", os.getenv("MKL_HOME", "")))
if MKL_DIR:
include_dirs.append(os.path.join(MKL_DIR, "include"))
lib_dirs.append(os.path.join(MKL_DIR, "lib", "intel64"))
HAVE_MKL = 1
CONDA_PREFIX = os.getenv("CONDA_PREFIX")
if CONDA_PREFIX:
include_dirs.append(os.path.join(CONDA_PREFIX, "include"))
lib_dirs.append(os.path.join(CONDA_PREFIX, "lib"))
if not HAVE_MKL:
# check whether mkl-devel is installed
HAVE_MKL = int(os.path.exists(os.path.join(CONDA_PREFIX, "include", "mkl.h")))
PYTHON_BASE = sys.base_prefix
if PYTHON_BASE != CONDA_PREFIX:
include_dirs.append(os.path.join(PYTHON_BASE, "include"))
lib_dirs.append(os.path.join(PYTHON_BASE, "lib"))
if not HAVE_MKL:
# check whether mkl-devel is installed
HAVE_MKL = int(os.path.exists(os.path.join(PYTHON_BASE, "include", "mkl.h")))
HAVE_MKL = int(os.getenv("HAVE_MKL", HAVE_MKL))
cython_macros["HAVE_MKL"] = HAVE_MKL
if HAVE_MKL:
if os.getenv("MKL_INTERFACE_LAYER", "LP64").startswith("ILP64"):
macros.append(("MKL_ILP64", None))
cython_macros["MKL_INTERFACE_LAYER"] = 1
else:
cython_macros["MKL_INTERFACE_LAYER"] = 0
else:
raise PawpyBuildError(
"Must have MKL installed to build pawpyseed. Please run 'pip install mkl-devel'"
)
# SET UP COMPILE/LINK ARGS AND THREADING
cfiles = [f + ".c" for f in srcfiles]
ext_files = cfiles
ext_files = ["pawpyseed/core/" + f for f in ext_files]
if DEBUG:
include_dirs.append("pawpyseed/core/tests")
rt_lib_dirs = lib_dirs[:]
if not DEBUG:
extra_args += ["-g0", "-O2"]
else:
extra_args += ["-g"]
extensions = [
Extension(
"pawpyseed.core.pawpyc",
ext_files + ["pawpyseed/core/pawpyc.pyx"],
define_macros=macros,
libraries=libs,
library_dirs=lib_dirs,
extra_link_args=extra_args + link_args,
extra_compile_args=extra_args,
runtime_library_dirs=rt_lib_dirs,
include_dirs=include_dirs,
)
]
if DEBUG:
extensions.append(
Extension(
"pawpyseed.core.tests.testc",
["pawpyseed/core/tests/testc.pyx", "pawpyseed/core/tests/tests.c"]
+ ext_files,
define_macros=macros,
libraries=libs,
library_dirs=lib_dirs,
extra_link_args=extra_args + link_args,
extra_compile_args=extra_args,
runtime_library_dirs=rt_lib_dirs,
include_dirs=include_dirs,
)
)
packages = ["pawpyseed", "pawpyseed.core", "pawpyseed.analysis"]
if DEBUG:
packages.append("pawpyseed.core.tests")
setup(
name="pawpyseed",
version="0.7.1",
description="Parallel C/Python package for numerical analysis of PAW DFT wavefunctions",
long_description=long_description,
long_description_content_type="text/markdown",
author="<NAME>",
author_email="<EMAIL>",
maintainer="<NAME>",
maintainer_email="<EMAIL>",
license="BSD",
setup_requires=["mkl-devel", "numpy>1.14", "Cython>=0.29.21"],
install_requires=[
"mkl-devel",
"numpy>=1.14",
"scipy>=1.0",
"pymatgen>=2018.2.13",
"sympy>=1.1.1",
"matplotlib>=0.2.5",
],
python_requires=">=3.6",
packages=packages,
data_files=[("", ["LICENSE", "README.md"])],
url="https://github.com/kylebystrom/pawpyseed",
classifiers=(
"Programming Language :: Python :: 3",
"License :: OSI Approved :: BSD License",
),
ext_modules=cythonize(
extensions,
include_path=[os.path.join(os.path.abspath(__file__), "pawpyseed/core")],
compile_time_env=cython_macros,
compiler_directives=comp_direct,
),
zip_safe=False,
)
|
kylebystrom/pawpyseed | pawpyseed/core/quad_check.py | import numpy as np
total = 0
for rstep in range(100):
numtheta = min(int(rstep * 27 / 100), 26) + 3
print(numtheta)
pts, wts = np.polynomial.legendre.leggauss(numtheta)
for thetastep in range(numtheta):
numphi = int((numtheta * 2 + 2) * (1 - pts[thetastep] ** 2) ** 0.5)
for phistep in range(numphi):
total += 1
print()
print(total)
|
kylebystrom/pawpyseed | pawpyseed/analysis/run_pawpy.py | <reponame>kylebystrom/pawpyseed
import os
import shutil
from shutil import rmtree
from monty.shutil import decompress_dir
from pymatgen.io.vasp import Vasprun
from pawpyseed.core.projector import Projector
from pawpyseed.core.wavefunction import Wavefunction
class PathHolder:
def __init__(self, path):
self.launch_dir = path
class DummyFirework:
def __init__(self, path):
self.launches = [PathHolder(path)]
self.name = path
self.fw_id = path
class DefectWorkflowWavefunctionHandle:
"""
This class is made to run Kyle's
wavefunction parsing code...
bulk_fw_sets is a dict that has bulk fireworks with supercell size as key
wf_job_run is a string which says if it is "normal" (=gga) or "scan" or "hse"
"""
def __init__(self, bulk_fw_sets, dwo=None):
self.bulk_fw_sets = bulk_fw_sets
self.dwo = dwo
def _setup_file_for_parsing(self, path):
# make a "kyle_file" for parsing out relevant data
# returns True if file setup correctly
files_to_copy = ["CONTCAR", "OUTCAR", "POTCAR", "WAVECAR", "vasprun.xml"]
kyle_path = os.path.join(path, "kyle_file")
if os.path.exists(kyle_path):
print("KYLE FILE ALREADY! Removing and rebuilding...")
rmtree(kyle_path)
# make kyle file
os.makedirs(kyle_path)
# copy relevant files to kyle path
for file in files_to_copy:
filepat = os.path.join(path, file + ".relax2.gz")
if not os.path.exists(filepat):
filepat = os.path.join(path, file + ".relax1.gz")
if not os.path.exists(filepat):
filepat = os.path.join(path, file + ".gz")
if not os.path.exists(filepat):
filepat = os.path.join(path, file)
if not os.path.exists(filepat):
print(f"Could not find {file}! Skipping this defect...")
return False
# COPY file to kyle_file
if ".gz" in filepat:
shutil.copy(filepat, os.path.join(kyle_path, file + ".gz"))
else:
shutil.copy(filepat, os.path.join(kyle_path, file))
decompress_dir(kyle_path)
return True
def _get_vbm_band_dict(self, path, vbm):
# ASSUMING KYLE PATH ALREADY SET UP, RETURNS A BAND_DICT to be used by run_pawpy
# (band numbers are keys, contains maxmin window of band energies (along with occupation),
# and stores percentage of band character after pawpy is run...)
vr = Vasprun(os.path.join(path, "kyle_file", "vasprun.xml"))
max_num = 0
band_dict = {
bandindex: {
"max_eigen": [-10000.0, 0.0],
"min_eigen": [10000.0, 0.0],
"VB_projection": None,
"CB_projection": None,
}
for bandindex in range(len(list(vr.eigenvalues.values())[0][0]))
}
for spin, spinset in vr.eigenvalues.items():
for kptset in spinset:
for bandnum, eigenset in enumerate(kptset):
if eigenset[1] and (eigenset[0] <= vbm) and (bandnum > max_num):
max_num = bandnum
# see if this is lowest eigenvalue for this band so far
if eigenset[0] < band_dict[bandnum]["min_eigen"][0]:
band_dict[bandnum]["min_eigen"] = eigenset[:]
# see if this is highest eigenvalue for this band so far
if eigenset[0] > band_dict[bandnum]["max_eigen"][0]:
band_dict[bandnum]["max_eigen"] = eigenset[:]
trim_band_dict = {
band_index: band_dict[band_index].copy()
for band_index in range(max_num - 20, max_num + 21)
}
return trim_band_dict, vr.is_spin
def run_pawpy(self):
"""
Container for running pawpyseed on all defects in this workflow
"""
vbm = None
bulk_dirs, wf_dirs = [], []
bulk_sizes, wf_sizes = [], []
for sc_size, bulk_fw in self.bulk_fw_sets.items():
launch_dir = bulk_fw.launches[-1].launch_dir
bulk_sizes.append(sc_size)
bulk_dirs.append(launch_dir)
if not vbm:
# need to check different filenames
vr = Vasprun(os.path.join(launch_dir, "vasprun.xml"))
vbm = vr.eigenvalue_band_properties[2]
print("\twill use vbm value of ", vbm)
for sc_size, size_set in self.dwo.defect_fw_sets.items():
for fw in size_set:
wf_sizes.append(sc_size)
wf_dirs.append(fw.launches[-1].launch_dir)
projector_list, bases = Projector.setup_bases(bulk_dirs, wf_dirs, True)
num_proj_els = bases[0].num_proj_els
store_all_data = {}
basis_sets = {}
for i, sc_size in enumerate(bulk_sizes):
basis_sets[sc_size] = bases[i]
# for each defect, in a given supercell size
for sc_size, size_set in self.dwo.defect_fw_sets.items():
for fw in size_set:
try:
launch_dir = fw.launches[-1].launch_dir
print(f"start parsing of {fw.name} wavefunction")
# setup file path,
print("\tsetting up files")
self._setup_file_for_parsing(launch_dir)
# find band number which is at VBM and iniitalize band_dict and find spin polarization
print("\tinitializing band_dict")
band_dict, spinpol = self._get_vbm_band_dict(launch_dir, vbm)
# setup defect wavefunction
print("\tmerging wf from dir")
wf = Wavefunction.from_atomate_directory(
launch_dir, setup_projectors=False
)
# loop over band projections around band edge and store results
print("\tperforming projections")
pr = Projector(
wf,
basis_sets[sc_size],
projector_list=projector_list,
unsym_wf=True,
)
for bandnum in band_dict.keys():
v, c = pr.proportion_conduction(bandnum, spinpol=spinpol)
band_dict[bandnum]["VB_projection"] = v[:]
band_dict[bandnum]["CB_projection"] = c[:]
print(bandnum, band_dict[bandnum])
# then tear down file path
print("\ttear down files")
rmtree(os.path.join(launch_dir, "kyle_file"))
# release defect memory
pr.wf.free_all()
store_all_data[fw.fw_id] = band_dict
except Exception as e:
print(
"___&*$#&(*#@&$)(*&@#)($----\n--> ERROR OCCURED. "
"Skipping this defect.\n-------------^#$^*&^#$&*^#@^$#-------------"
)
print(repr(e))
# now release all bulk basis memory
for bulk_basis in basis_sets.values():
bulk_basis.free_all()
Projector.free_projector_list(projector_list, num_proj_els)
return store_all_data
|
Nuklear-s-Team/fibonacciThing | Fibonnacci GUI VERSION.py | <reponame>Nuklear-s-Team/fibonacciThing
# Copyright 2020 <NAME> and <NAME>
# Improved by Luzgog
# Made with the same translate function as the original.
import tkinter as tkinter
from tkinter import messagebox
from tkinter import scrolledtext
from Fibonacci_Encoder import encode, decode, encodeReversed, decodeReversed, randomGen, encodeRandom, generatefromkey, decodeRandom
root = tkinter.Tk(className="Fibonacci Encoder")
root.resizable(0, 0)
help = tkinter.Toplevel(width=90, height=90)
help.withdraw()
translations = tkinter.Toplevel(width=90, height=90)
translations.withdraw()
lol = tkinter.StringVar()
mode = tkinter.StringVar()
lastTranslation = tkinter.StringVar()
remTrue = tkinter.IntVar()
key = tkinter.StringVar()
lastTranslation.set("Last Tranlsation: None")
currentTranslation = ""
#root.iconphoto(False, tkinter.PhotoImage(file="icon.png"))
decodeNames = ["de", "decode", "De", "Decode", "d", "D"]
encodeNames = ["en", "encode", "En", "Encode", "e", "E"]
def showHelp():
help.deiconify()
def closeHelp():
help.withdraw()
def error():
messagebox.showerror("Error", "Invalid Input")
def keyMissingError():
messagebox.showerror("Error", "No Key")
def translate():
global currentTranslation
task = lol.get()
message = T.get()
version = mode.get()
getKey = RandomEncode.get()
global key
T.delete(0, tkinter.END)
if task in encodeNames:
if version == "Regular":
try:
totranslate= encode(message)
except BaseException:
error()
elif version == "Reversed":
try:
totranslate = encodeReversed(message)
except BaseException:
error()
elif version == "Random":
try:
if getKey == "":
randomDict, key2 = randomGen()
key.set(key2)
key2 = ''
totranslate = encodeRandom(message, randomDict)
getKey = ""
randomDict = {}
else:
totranslate = encodeRandom(message, generatefromkey(getKey))
getKey = ""
except BaseException:
error()
elif task in decodeNames:
if version == "Regular":
try:
totranslate = decode(message)
except BaseException:
error()
elif version == "Reversed":
try:
totranslate = decodeReversed(message)
except BaseException:
error()
elif version == "Random":
if getKey == "":
keyMissingError()
else:
totranslate = decodeRandom(message, generatefromkey(getKey))
T.insert(tkinter.END, totranslate)
link.config(state=tkinter.NORMAL)
keyDisplay.config(state=tkinter.NORMAL)
link.delete(1.0, tkinter.END)
keyDisplay.delete(1.0, tkinter.END)
mode2 = mode.get()
link.insert(1.0, "Last Translation: (" + mode2 + " Mode) " + message + " <-> " + totranslate)
currentTranslation = "(" + mode2 + " Mode) " + message + " <-> " + totranslate
keyDisplay.insert(1.0, str(key.get()))
key.set("")
link.config(state=tkinter.DISABLED)
keyDisplay.config(state=tkinter.DISABLED)
def updateSaves():
saves.config(state=tkinter.NORMAL)
saves.delete(1.0, tkinter.END)
with open("savedTranslations.txt", "r") as file:
textIn = file.read()
saves.insert(1.0, textIn)
saves.config(state=tkinter.DISABLED)
# insert text
def saveTranslation():
global currentTranslation
with open("savedTranslations.txt", 'a') as file:
file.write("\n\n")
file.write(currentTranslation)
updateSaves()
# save file
def clearSaves():
with open("savedTranslations.txt", "w") as file:
file.write("")
updateSaves()
def openTrans():
translations.deiconify()
def closeTrans():
translations.withdraw()
def copyKey():
root.clipboard_clear()
root.clipboard_append(keyDisplay.get(1.0, tkinter.END))
def remember():
if remTrue.get() == 0:
with open("savedKey.txt", "w") as file:
file.write("")
else:
with open("savedKey.txt", "w") as file:
file.write(RandomEncode.get())
def initialize():
saveTranslation()
with open("savedKey.txt", "r") as file:
key3 = file.read()
if key3 == "":
pass
else:
RandomEncode.insert(1, key3)
Encode = tkinter.Radiobutton(root, text='Encode', variable=lol, value="Encode", indicatoron=0, width=42, selectcolor="light green").grid(row=1, column=0, sticky=tkinter.W)
Decode = tkinter.Radiobutton(root, text='Decode', variable=lol, value="Decode", indicatoron=0, width=42, selectcolor="light green").grid(row=1, column=1, sticky=tkinter.E)
Regular = tkinter.Radiobutton(root, text='Regular', variable=mode, value="Regular", indicatoron=0, width=42, selectcolor="cyan").grid(row=4, column=0, sticky=tkinter.W)
Reversed = tkinter.Radiobutton(root, text='Reversed', variable=mode, value="Reversed", indicatoron=0, width=42, selectcolor="cyan").grid(row=4, column=1, sticky=tkinter.E)
Random = tkinter.Radiobutton(root, text="Use Random Dictionary", variable=mode, value="Random", indicatoron=0, width=42, selectcolor="gold").grid(row=5, column=0, sticky=tkinter.N)
RandomEncode = tkinter.Entry(root, width=48)
RandomEncode.grid(row=5, column=1)
J = tkinter.Scale(root, state=tkinter.DISABLED, length=600, troughcolor="black", width=1, orient=tkinter.HORIZONTAL, showvalue=0, sliderlength=200, label="--------------------------------------------------------Mode--------------------------------------------------------")
J.set(100)
J.grid(row=0, sticky=tkinter.N, columnspan=2)
R = tkinter.Scale(root, state=tkinter.DISABLED, length=500, troughcolor="black", width=1, orient=tkinter.HORIZONTAL, showvalue=0, sliderlength=200, label="--------------------------------------------Key Sets--------------------------------------------")
R.set(100)
R.grid(row=3, sticky=tkinter.N, columnspan=2)
T = tkinter.Entry(root, width=100)
L = tkinter.Scale(root, state=tkinter.DISABLED, length=500, troughcolor="black", width=1, orient=tkinter.HORIZONTAL, showvalue=0, sliderlength=200, label="------------------------------------------Input Below------------------------------------------")
L.set(100)
L.grid(row=6, sticky=tkinter.N, columnspan=2)
T.grid(row=7, column=0, sticky=tkinter.W, columnspan=2)
X = tkinter.Scale(root, state=tkinter.DISABLED, length=500, troughcolor="black", width=1, orient=tkinter.HORIZONTAL, showvalue=0, sliderlength=200)
X.set(100)
X.grid(row=8, sticky=tkinter.N, columnspan=2)
Translate = tkinter.Button(root, text="Translate", command=translate, width=42, activebackground="light green").grid(row=9, column=0, sticky=tkinter.W)
Quit = tkinter.Button(root, text="Quit", command=root.quit, width=85, activebackground="red").grid(row=10, columnspan=2, sticky=tkinter.E)
T.insert(tkinter.END, "")
Help = tkinter.Button(root, text="Help", command=showHelp, width=42, activebackground="blue").grid(row=9, column=1)
X = tkinter.Scale(root, state=tkinter.DISABLED, length=600, troughcolor="black", width=1, orient=tkinter.HORIZONTAL, showvalue=0, sliderlength=200)
X.set(100)
X.grid(row=11, sticky=tkinter.N, columnspan=2)
link = scrolledtext.ScrolledText(root, width=65, height=5, wrap="word", font="consolas", state=tkinter.DISABLED)
link.grid(row=12, sticky=tkinter.W, columnspan=2)
uti = tkinter.Scale(root, state=tkinter.DISABLED, length=600, troughcolor="black", width=1, orient=tkinter.HORIZONTAL, showvalue=0, sliderlength=200, label="-------------------------------------------------------Utilities-------------------------------------------------------")
uti.set(100)
uti.grid(row=13, sticky=tkinter.N, columnspan=2)
save = tkinter.Button(root, text="Save this translation", command=saveTranslation, width=42, activebackground="light green").grid(row=14, column=0, sticky=tkinter.N)
copy = tkinter.Button(root, text="Copy this key", command=copyKey, width=42, activebackground="light green").grid(row=14, column=1, sticky=tkinter.N)
openTranslations = tkinter.Button(root, text="Open Saved Translations", command=openTrans, width=42, activebackgroun="light green").grid(row=15, column=0, sticky=tkinter.N)
rememberKey = tkinter.Checkbutton(root, text="Remember my key", var=remTrue, command=remember).grid(row=15, column=1, sticky=tkinter.N)
keyDisplay = scrolledtext.ScrolledText(root, height=0.5, width=73, wrap="word", state=tkinter.DISABLED)
keyDisplay.grid(row=16, sticky=tkinter.W, columnspan=2)
info = tkinter.Label(root, text="Created by <NAME> Luzgog. Github link: https://github.com/PG-Development/Fibonacci-Encoder")
info.grid(row=17, sticky=tkinter.E, columnspan=2)
# help window below
title = tkinter.Label(help, text="Help Menu").grid(row=0, sticky=tkinter.N)
helpText = scrolledtext.ScrolledText(help, width=65, height=20, wrap="word")
helpText.grid(row=1, sticky=tkinter.N)
helpText.insert(1.0, "Thanks for downloading this encoder! My team and I have worked hard on it. \n \n"
"Important: When you want to close this, do NOT press the x at the top right. Press the exit help menu button at the bottom.\n \n"
"When you download this repository, you should have gotten 2 .py files: the Fibonacci_Encoder, and the Fibonacci GUI Version file. "
"Make sure they are in the same folder. The GUI Version is the much more convenient version of this program, but it uses the same functions. "
"When you open up the GUI file, you should see a small window pop up on your screen. This is the main application window, built with tkinter. "
"You select your mode at the top, choosing from either Encode, Decode, or Decode from Random. To encode from a random, select Encode from random in the keysets. "
"Below those, you should see keysets. You have regular, reversed, and encode from random. Next to both of the random choices you see inputs. "
"Below the modes and keysets you see an input, to place your text inside, and this is where the message will come out. \n \n"
"Regular Mode\n"
"Regular mode is the base mode of this encoder. It uses a set dictionary of keys and items, to encode your message. "
"To use this mode, simply choose the \"Encode\" mode and the \"Regular\" Keyset. Once you press translate, your message will be replaced "
"by the encoded version. Below your output, there is a last translation box which shows you the original. "
"To decode, just switch your mode to decode, and input the code you received from your friend into the box. "
"It should change before your very eyes into comprehensible text.\n\n"
"Reversed Mode\n"
"Reversed mode is a separate, different keyset then the regular mode. It takes the code for a letter, and switches it around to the opposite letter. "
"For example, the code for a is now the code for z, and the code for b is now the code for y. This means the code for z is now the code for a, and so on. "
"To use this keyset, choose which mode you want, and then instead of selecting the \"Regular\" keyset, choose the \"Reversed\" keyset.\n\n"
"Random Mode\n"
"Random Mode scrambles the codes for the letters to random locations. The total number of possible dictionaries is 403,291,461,126,605,635,584,000,000, aka 403 septillion. "
"That's a lot of possible combinations! And every time you use it, it generates a random choice. Now, that's cool, but say you want to retrieve an already generated "
"dictionary. That's easy! You see, whenever you generate a new dictionary, a key will appear in the lower text box. Just press the \"Copy this key\" buttton "
"to copy the key.\n\n"
"To encode using this mode, you first select the \"Encode\" Button. Then select the \"Use Random Dictionary\" choice. If you already have a key, put it in the "
"entry box next to the button. If you do not have a key, simply leave the box blank. Then press translate. You should get a result and a key. If you want to now encode "
"more messages using the same key, just copy the key and put it in the box. When you send messages to someone else, send them the key privately, so then you can"
" send them the message in public and other people will get gibberish.\n\n"
"To decode using this mode, you must have a key, or else you will get an error. Put the key in the box next to \"Use Random Dictionary\". Then select \"Decode\" "
"and \"Use Random Dictionary\". Finally, put in the message in the lower entry box. When you press Translate, you should get a good message.\n\n"
"Utilities\n"
"There are 3 utilities buttons: the \"Save this translation\" button, the \"Copy this key\" button, and the \"Open Saved Translations\" button. "
"These are here to help you use the app more efficiently.\n\n"
"The \"Save This Translations\" button takes the translation you just did and puts it into another text window that you can open. This text resets everytime you "
"close the app, so keep the app open go save your translations. This feature will be improved in the future to save the translation to a text file. To open this text "
"window, just press the \"Open Saved Translations\" button. The \"Copy This Key\" button just copies the key if you have one.\n\n"
"A new feature is the \"Remember my key\" feature, which can save your key for another time. Whenever you want to save your key, just check it. To update your "
"key, you must uncheck it and then recheck it to make changes to the .txt file. If you want to use this feature, you must download the savedKey.txt file. To "
"reset your key, you can just uncheck it again.")
helpText.tag_add("important", "3.0", "3.9")
helpText.tag_add("regularTag", "7.0", "7.12", "10.0", "10.13", "13.0", "13.12", "20.0", "20.9")
helpText.tag_config("important", foreground="red", font=("Consolas", 13, "bold", "italic"))
helpText.tag_config("regularTag", foreground="blue", font=("Consolas", 12, "bold", "italic"))
helpText.config(state=tkinter.DISABLED)
closeHelpButton = tkinter.Button(help, text="Exit Help Menu", command=closeHelp, activebackground="red").grid(row=2, sticky=tkinter.N)
# saved translations below
# saved translations below
titleTrans = tkinter.Label(translations, text="Saved Translations").grid(row=0, sticky=tkinter.N)
saves = scrolledtext.ScrolledText(translations, width=65, height=20, wrap="word")
saves.grid(row=1, sticky=tkinter.N)
saves.config(state=tkinter.DISABLED)
closeTransButton = tkinter.Button(translations, text="Exit Saved Translations", command=closeTrans, activebackground="red", width=38).grid(row=2, sticky=tkinter.W)
clearSavesButton = tkinter.Button(translations, text="Clear Saves", command=clearSaves, activebackground="red", width=38).grid(row=2, sticky=tkinter.E)
initialize()
tkinter.mainloop() |
Nuklear-s-Team/fibonacciThing | Fibonacci_Encoder.py | # Copyright 2019 by <NAME> and <NAME>
# Improved by Luzgog
# How to use:
# Run the program.
# It will ask you to decode or encode.
# Type de for decode, and en for encode.
# Then type your message. While encoding,
# you can use most punctuation except an
# apostrophe. You can use commas and other
# punctuation. While decoding, you just paste
# the message you got in the prompt.
# When you try to encode an apostrophe, it works as expected.
# However the decoder will not work if there is one.
# This is being fixed soon.
# Please credit me as the original author if you are making something using this.
# Thanks! :) ;)
import random
keys = {" ": "|", "a": "2.", "b": "3.", "c": "5.", "d": "8.", "e": "13.", "f": "21.", "g": "34.", "h": "55.",
"i": "89.", "j": "144.", "k": "233.", "l": "377.", "m": "610.", "n": "987.", "o": "1597.", "p": "2584.",
"q": "4181.", "r": "6765.", "s": "10946.", "t": "17711.", "u": "28657.", "v": "46368.", "w": "75025.",
"x": "121393.", "y": "196418.", "z": "317811.", "!": "!", ",": ",", "?": "?", "'": "'.", "|": "|", ":": ":",
"": "", "-": "-."}
dekeys = {v: k for k, v in keys.items()}
cdekeys = {k.replace('.', ''): v for k, v in dekeys.items()}
reversedkeys = {" ": "|", "a": "317811.", "b": "196418.", "c": "121393.", "d": "75025.", "e": "46368.", "f": "28657.", "g": "17711.", "h": "10946.",
"i": "6765.", "j": "4181.", "k": "2584.", "l": "1597.", "m": "987.", "n": "610.", "o": "377.", "p": "233.",
"q": "144.", "r": "89.", "s": "55.", "t": "34.", "u": "21.", "v": "13.", "w": "8.",
"x": "5.", "y": "3.", "z": "2.", "!": "!", ",": ",", "?": "?", "'": "'.", "|": "|", ":": ":",
"": "", "-": "-."}
dereversedkeys = {v: k for k, v in reversedkeys.items()}
cdereversedkeys = {k.replace('.', ''): v for k, v in dereversedkeys.items()}
availableKeysBase = ["2.", "3.", "5.", "8.", "13.", "21.", "34.", "55.", "89.", "144.", "233.", "377.", "610.", "987.", "1597.", "2584.",
"4181.", "6765.", "10946.", "17711.", "28657.", "46368.", "75025.", "121393.", "196418.", "317811."]
availableKeys = ["2.", "3.", "5.", "8.", "13.", "21.", "34.", "55.", "89.", "144.", "233.", "377.", "610.", "987.", "1597.", "2584.",
"4181.", "6765.", "10946.", "17711.", "28657.", "46368.", "75025.", "121393.", "196418.", "317811."]
letters = ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
keyList = []
symbolsDict = {" ": "|", "!": "!", ",": ",", "?": "?", "'": "'.", "|": "|", ":": ":", "": "", "-": "-."}
randomDict = {}
def encode(message):
totranslate = message.lower()
totranslate = totranslate.replace(".", "|||")
for item in totranslate:
totranslate = totranslate.replace(item, str(keys[item]))
totranslate = totranslate.replace(".|", "|")
if totranslate[-1] == ".":
totranslate = totranslate[:len(totranslate) - 1]
return totranslate
def decode(message):
totranslate = message.replace("|||", "~")
totranslate = totranslate.replace("|", " ")
translist = totranslate.split(sep=".")
for item in translist:
if " " in item:
index1 = translist.index(item)
temp = item.split(" ")
for item in temp:
if "~" in item:
temp2 = item.replace("~", "")
tempIndex = temp.index(item)
temp[tempIndex] = temp2
temp2 = str(cdekeys[temp2]) # excuse my messy code here
temp[tempIndex] = temp2
temp2 = temp2 + ". "
temp[tempIndex] = temp2
else:
index2 = temp.index(item)
item = str(cdekeys[item])
temp[index2] = item
item = " ".join(temp)
translist[index1] = item
elif "~" in item:
temp = item.replace("~", "")
temp = str(cdekeys[temp])
index = translist.index(item)
item = temp + ". "
translist[index] = item
else:
index = translist.index(item)
translist[index] = item.replace(item, str(cdekeys[item]))
return "".join(translist)
def encodeReversed(message):
totranslate = message.lower()
totranslate = totranslate.replace(".", "|||")
for item in totranslate:
totranslate = totranslate.replace(item, str(reversedkeys[item]))
totranslate = totranslate.replace(".|", "|")
if totranslate[-1] == ".":
totranslate = totranslate[:len(totranslate) - 1]
return totranslate
def decodeReversed(message):
totranslate = message.replace("|||", "~")
totranslate = totranslate.replace("|", " ")
translist = totranslate.split(sep=".")
for item in translist:
if " " in item:
index1 = translist.index(item)
temp = item.split(" ")
for item in temp:
if "~" in item:
temp2 = item.replace("~", "")
tempIndex = temp.index(item)
temp[tempIndex] = temp2
temp2 = str(cdereversedkeys[temp2]) # excuse my messy code here
temp[tempIndex] = temp2
temp2 = temp2 + ". "
temp[tempIndex] = temp2
else:
index2 = temp.index(item)
item = str(cdereversedkeys[item])
temp[index2] = item
item = " ".join(temp)
translist[index1] = item
elif "~" in item:
temp = item.replace("~", "")
temp = str(cdereversedkeys[temp])
index = translist.index(item)
item = temp + ". "
translist[index] = item
else:
index = translist.index(item)
translist[index] = item.replace(item, str(cdereversedkeys[item]))
return "".join(translist)
def randomGen():
global availableKeys
global availableKeysBase
keyList = []
randomDict = {}
for b in letters:
randomNum = random.randint(0, len(availableKeys)-1)
randomDict[b] = availableKeys[randomNum]
keyList.append(str(availableKeysBase.index(availableKeys[randomNum])))
availableKeys.pop(randomNum)
randomDict.update(symbolsDict)
finalKey = ",".join(keyList)
availableKeys = ["2.", "3.", "5.", "8.", "13.", "21.", "34.", "55.", "89.", "144.", "233.", "377.", "610.", "987.", "1597.", "2584.",
"4181.", "6765.", "10946.", "17711.", "28657.", "46368.", "75025.", "121393.", "196418.", "317811."]
return randomDict, finalKey
def encodeRandom(message, randomDict):
totranslate = message.lower()
totranslate = totranslate.replace(".", "|||")
for item in totranslate:
totranslate = totranslate.replace(item, str(randomDict[item]))
totranslate = totranslate.replace(".|", "|")
if totranslate[-1] == ".":
totranslate = totranslate[:len(totranslate) - 1]
return totranslate
def generatefromkey(key4):
dic = {}
listOfKeys = key4.split(",")
for w in letters:
dic[w] = availableKeysBase[int(listOfKeys[letters.index(w)])]
dic.update(symbolsDict)
return dic
def decodeRandom(message, dict):
randomdekeys = {v: k for k, v in dict.items()}
randomcdekeys = {k.replace('.', ''): v for k, v in randomdekeys.items()}
totranslate = message.replace("|||", "~")
totranslate = totranslate.replace("|", " ")
translist = totranslate.split(sep=".")
for item in translist:
if " " in item:
index1 = translist.index(item)
temp = item.split(" ")
for item in temp:
if "~" in item:
temp2 = item.replace("~", "")
tempIndex = temp.index(item)
temp[tempIndex] = temp2
temp2 = str(randomcdekeys[temp2]) # excuse my messy code here
temp[tempIndex] = temp2
temp2 = temp2 + ". "
temp[tempIndex] = temp2
else:
index2 = temp.index(item)
item = str(randomcdekeys[item])
temp[index2] = item
item = " ".join(temp)
translist[index1] = item
elif "~" in item:
temp = item.replace("~", "")
temp = str(randomcdekeys[temp])
index = translist.index(item)
item = temp + ". "
translist[index] = item
else:
index = translist.index(item)
translist[index] = item.replace(item, str(randomcdekeys[item]))
return "".join(translist)
if __name__ == '__main__': # executed only if you use the file directly and not by importing it
complete = False
decodeNames = ["de", "decode", "De", "Decode", "d", "D"]
encodeNames = ["en", "encode", "En", "Encode", "e", "E"]
while complete == False:
task = str(input("Decode or encode?"))
if task in encodeNames:
print("Random of not? Y or N")
i = input()
if i in ["Yes", 'y', 'yes', 'Y']:
print("Give me the message")
message = input()
print("Give me the key [just press enter if you want to generate it randomly]")
key = input()
if key == '':
randomDict, key = randomGen()
print("your key is {}".format(key))
print("And your message is:")
print(encodeRandom(message, randomDict))
else:
print(encodeRandom(message, generatefromkey(key)))
elif i in ["No", "N", "n", "no"]:
print("Reverse Y or N?")
k = input()
if k in ["Yes", "yes", "y", "Y"]:
print("Give me the message")
message = input()
print(encodeReversed(message))
else:
print("Normal Mode")
print("Give me your message")
message = str(input(""))
print(encode(message))
else:
print("Wrong input")
elif task in decodeNames:
print("Random? Y or N")
i = input()
if i in ["Yes", 'y', 'yes', 'Y']:
print("Give me the message")
message = input()
print("Give me the key ")
key_input = input()
print(decodeRandom(message, generatefromkey(key_input)))
elif i in ["No", "N", "n", "no"]:
print("Reverse Y or N?")
k = input()
if k in ["Yes", "yes", "y", "Y"]:
print("Give me the message")
message = input()
print(decodeReversed(message))
else:
print("Normal Mode")
print("Give me your message")
message = str(input(""))
print(decode(message))
else:
print("Wrong input")
else:
print("You did not choose a valid action.")
print("Another task? Y or N")
again = input()
if again in ["No", "N", "n", "no"]:
complete = True
|
homersoft/oadr2-ven-python | test/util.py | <gh_stars>1-10
from lxml import etree
def read_xml(path):
utf8_parser = etree.XMLParser(encoding="utf-8")
with open(path) as xml_file:
return etree.fromstring(xml_file.read().encode("utf-8"), parser=utf8_parser)
|
homersoft/oadr2-ven-python | test/signal_level_unittest.py | <filename>test/signal_level_unittest.py<gh_stars>1-10
import unittest
from datetime import datetime, timedelta
from os import remove
from test.adr_event_generator import AdrEvent, AdrEventStatus
from freezegun import freeze_time
from oadr2.poll import OpenADR2
DB_FILENAME = "test.db"
class SignalLevelTest(unittest.TestCase):
def setUp(self):
self.start_thread = False
# Some configureation variables, by default, this is for the a handler
config = {'vtn_ids': 'vtn_1,vtn_2,vtn_3,TH_VTN',
'ven_id': 'ven_py', 'db_path': DB_FILENAME}
self.adr_client = OpenADR2(
event_config=config,
vtn_base_uri="",
start_thread=self.start_thread
)
# Make things a little nicer for us to see
print('')
print((40 * '='))
def tearDown(self):
remove(DB_FILENAME)
# self.adr_client.event_handler.update_all_events({}, '') # Clear out the database
def test_active_event_with_single_interval(self):
print('in test_active_event_with_single_interval()')
events = [
AdrEvent(
id="EventID",
start=datetime(year=2020, month=3, day=18, hour=8),
status=AdrEventStatus.ACTIVE,
signals=[dict(index=0, duration=timedelta(hours=5), level=1.0)],
),
]
xml_events = [e.to_obj() for e in events]
with freeze_time(events[0].start - timedelta(minutes=1)):
signal_level, *_ = self.adr_client.event_controller._calculate_current_event_status(xml_events)
assert signal_level == 0
with freeze_time(events[0].start + timedelta(minutes=1)):
signal_level, *_ = self.adr_client.event_controller._calculate_current_event_status(xml_events)
assert signal_level == events[0].signals[0]["level"]
with freeze_time(events[0].end + timedelta(minutes=1)):
signal_level, *_ = self.adr_client.event_controller._calculate_current_event_status(xml_events)
assert signal_level == 0
def test_active_event_with_multiple_intervals(self):
print('in test_active_event_with_multiple_intervals()')
events = [
AdrEvent(
id="EventID",
start=datetime(year=2020, month=3, day=18, hour=10),
status=AdrEventStatus.ACTIVE,
signals=[
dict(index=0, duration=timedelta(hours=4), level=3.0),
dict(index=1, duration=timedelta(hours=4), level=2.0),
],
),
]
xml_events = [e.to_obj() for e in events]
with freeze_time(events[0].start - timedelta(minutes=1)):
signal_level, *_ = self.adr_client.event_controller._calculate_current_event_status(xml_events)
assert signal_level == 0
with freeze_time(events[0].start + timedelta(minutes=1)):
signal_level, *_ = self.adr_client.event_controller._calculate_current_event_status(xml_events)
assert signal_level == events[0].signals[0]["level"]
with freeze_time(events[0].start + timedelta(hours=4, minutes=1)):
signal_level, *_ = self.adr_client.event_controller._calculate_current_event_status(xml_events)
assert signal_level == events[0].signals[1]["level"]
with freeze_time(events[0].end + timedelta(minutes=1)):
signal_level, *_ = self.adr_client.event_controller._calculate_current_event_status(xml_events)
assert signal_level == 0
def test_pending_event(self):
print('in test_pending_event()')
events = [
AdrEvent(
id="EventID",
start=datetime(year=2020, month=3, day=18, hour=20),
status=AdrEventStatus.PENDING,
signals=[
dict(index=0, duration=timedelta(hours=2), level=2.0),
],
),
]
xml_events = [e.to_obj() for e in events]
with freeze_time(events[0].start - timedelta(minutes=1)):
signal_level, *_ = self.adr_client.event_controller._calculate_current_event_status(xml_events)
assert signal_level == 0
with freeze_time(events[0].start + timedelta(minutes=1)):
signal_level, *_ = self.adr_client.event_controller._calculate_current_event_status(xml_events)
assert signal_level == events[0].signals[0]["level"]
with freeze_time(events[0].end + timedelta(minutes=1)):
signal_level, *_ = self.adr_client.event_controller._calculate_current_event_status(xml_events)
assert signal_level == 0
def test_cancelled_event(self):
print('in test_cancelled_event()')
events = [
AdrEvent(
id="EventID",
start=datetime(year=2020, month=3, day=18, hour=8),
end=datetime(year=2020, month=3, day=18, hour=8, second=1),
status=AdrEventStatus.CANCELLED,
signals=[
dict(index=0, duration=timedelta(hours=10), level=3.0),
],
),
]
xml_events = [e.to_obj() for e in events]
with freeze_time(events[0].start - timedelta(minutes=1)):
signal_level, *_ = self.adr_client.event_controller._calculate_current_event_status(xml_events)
assert signal_level == 0
with freeze_time(events[0].start + timedelta(minutes=1)):
signal_level, *_ = self.adr_client.event_controller._calculate_current_event_status(xml_events)
assert signal_level == 0
with freeze_time(events[0].end + timedelta(minutes=1)):
signal_level, *_ = self.adr_client.event_controller._calculate_current_event_status(xml_events)
assert signal_level == 0
def test_multiple_events(self):
print('in test_multiple_events()')
events = [
AdrEvent(
id="EventID1",
start=datetime(year=2020, month=3, day=18, hour=8),
status=AdrEventStatus.ACTIVE,
signals=[
dict(index=0, duration=timedelta(hours=2), level=1.0),
],
),
AdrEvent(
id="EventID2",
start=datetime(year=2020, month=3, day=18, hour=8),
end=datetime(year=2020, month=3, day=18, hour=8, second=1),
status=AdrEventStatus.CANCELLED,
signals=[
dict(index=0, duration=timedelta(hours=10), level=3.0),
],
),
AdrEvent(
id="EventID3",
start=datetime(year=2020, month=3, day=18, hour=10),
status=AdrEventStatus.ACTIVE,
signals=[
dict(index=0, duration=timedelta(hours=4), level=3.0),
dict(index=1, duration=timedelta(hours=4), level=2.0),
]
),
AdrEvent(
id="EventID4",
start=datetime(year=2020, month=3, day=18, hour=20),
status=AdrEventStatus.PENDING,
signals=[
dict(index=0, duration=timedelta(hours=2), level=2.0),
],
),
]
xml_events = [e.to_obj() for e in events]
with freeze_time(events[0].start - timedelta(minutes=1)):
signal_level, *_ = self.adr_client.event_controller._calculate_current_event_status(xml_events)
assert signal_level == 0
with freeze_time(events[0].start + timedelta(minutes=1)):
signal_level, *_ = self.adr_client.event_controller._calculate_current_event_status(xml_events)
assert signal_level == events[0].signals[0]["level"]
with freeze_time(events[0].start + events[2].raw_signals[0]["duration"] + timedelta(minutes=1)):
signal_level, *_ = self.adr_client.event_controller._calculate_current_event_status(xml_events)
assert signal_level == events[2].signals[0]["level"]
with freeze_time(events[2].start + events[2].raw_signals[1]["duration"] + timedelta(minutes=1)):
signal_level, *_ = self.adr_client.event_controller._calculate_current_event_status(xml_events)
assert signal_level == events[2].signals[1]["level"]
with freeze_time(events[2].end + timedelta(minutes=1)):
signal_level, *_ = self.adr_client.event_controller._calculate_current_event_status(xml_events)
assert signal_level == 0
with freeze_time(events[3].start + timedelta(minutes=1)):
signal_level, *_ = self.adr_client.event_controller._calculate_current_event_status(xml_events)
assert signal_level == events[3].signals[0]["level"]
with freeze_time(events[3].end + timedelta(minutes=1)):
signal_level, *_ = self.adr_client.event_controller._calculate_current_event_status(xml_events)
assert signal_level == 0
if __name__ == '__main__':
unittest.main()
|
homersoft/oadr2-ven-python | oadr2/poll.py | # pylint: disable=W1202, I1101
import threading
import urllib.error
import urllib.parse
import urllib.request
from random import uniform
import requests
from lxml import etree
from oadr2 import base, logger
# HTTP parameters:
REQUEST_TIMEOUT = 5 # HTTP request timeout
DEFAULT_VTN_POLL_INTERVAL = 300 # poll the VTN every X seconds
MINIMUM_POLL_INTERVAL = 10
POLLING_JITTER = 0.1 # polling interval +/-
OADR2_URI_PATH = 'OpenADR2/Simple/' # URI of where the VEN needs to request from
class OpenADR2(base.BaseHandler):
'''
poll.OpenADR2 is the class for sending requests and responses for OpenADR
2.0 events over HTTP.
Member Variables:
--------
(Everything from base.BaseHandler)
vtn_base_uri
vtn_poll_interval
ven_client_cert_key
ven_client_cert_pem
vtn_ca_certs
poll_thread
'''
def __init__(self, event_config, vtn_base_uri,
control_opts={},
username=None,
password=<PASSWORD>,
ven_client_cert_key=None,
ven_client_cert_pem=None,
vtn_ca_certs=False,
vtn_poll_interval=DEFAULT_VTN_POLL_INTERVAL,
start_thread=True,
client_id=None,
):
'''
Sets up the class and intializes the HTTP client.
event_config -- A dictionary containing key-word arugments for the
EventHandller
ven_client_cert_key -- Certification Key for the HTTP Client
ven_client_cert_pem -- PEM file/string for the HTTP Client
vtn_base_uri -- Base URI of the VTN's location
vtn_poll_interval -- How often we should poll the VTN
vtn_ca_certs -- CA Certs for the VTN
start_thread -- start the thread for the poll loop or not? left as a legacy option
'''
# Call the parent's methods
super(OpenADR2, self).__init__(event_config, control_opts, client_id=client_id)
# Get the VTN's base uri set
self.vtn_base_uri = vtn_base_uri
if self.vtn_base_uri: # append path
join_char = '/' if self.vtn_base_uri[-1] != '/' else ''
self.vtn_base_uri = join_char.join((self.vtn_base_uri, OADR2_URI_PATH))
try:
self.vtn_poll_interval = int(vtn_poll_interval)
assert self.vtn_poll_interval >= MINIMUM_POLL_INTERVAL
except ValueError:
logger.warning('Invalid poll interval: %s', self.vtn_poll_interval)
self.vtn_poll_interval = DEFAULT_VTN_POLL_INTERVAL
# Security & Authentication related
self.ven_certs = (ven_client_cert_pem, ven_client_cert_key)\
if ven_client_cert_pem and ven_client_cert_key else None
self.vtn_ca_certs = vtn_ca_certs
self.__username = username
self.__password = password
self.poll_thread = None
if start_thread: # this is left for backward compatibility
self.start()
logger.info("+++++++++++++++ OADR2 module started ++++++++++++++")
def start(self):
'''
Initialize the HTTP client.
start_thread -- To start the polling thread or not.
'''
if self.poll_thread and self.poll_thread.is_alive():
logger.warning("Thread is already running")
return
self.poll_thread = threading.Thread(
name='oadr2.poll',
target=self.poll_vtn_loop)
self.poll_thread.daemon = True
self._exit.clear()
self.poll_thread.start()
logger.info("Polling thread started")
def stop(self):
'''
Stops polling without stopping event controller
:return:
'''
if self.poll_thread is not None:
self.poll_thread.join(2) # they are daemons.
self._exit.set()
logger.info("Polling thread stopped")
def exit(self):
'''
Shutdown the HTTP client, join the running threads and exit.
'''
if self.poll_thread is not None:
self.poll_thread.join(2) # they are daemons.
super(OpenADR2, self).exit()
def poll_vtn_loop(self):
'''
The threading loop which polls the VTN on an interval
'''
while not self._exit.is_set():
try:
self.query_vtn()
except urllib.error.HTTPError as ex: # 4xx or 5xx HTTP response:
logger.warning("HTTP error: %s\n%s", ex, ex.read())
except urllib.error.URLError as ex: # network error.
logger.debug("Network error: %s", ex)
except Exception as ex:
logger.exception("Error in OADR2 poll thread: %s", ex)
self._exit.wait(
uniform(
self.vtn_poll_interval*(1-POLLING_JITTER),
self.vtn_poll_interval*(1+POLLING_JITTER)
)
)
logger.info("+++++++++++++++ OADR2 polling thread has exited.")
def query_vtn(self):
'''
Query the VTN for an event.
'''
if not self.vtn_base_uri:
logger.warning("VTN base URI is invalid: %s", self.vtn_base_uri)
return
event_uri = self.vtn_base_uri + 'EiEvent'
payload = self.event_handler.build_request_payload()
logger.debug(
f'New polling request to {event_uri}:\n'
f'{etree.tostring(payload, pretty_print=True).decode("utf-8")}'
)
try:
resp = requests.post(
event_uri,
cert=self.ven_certs,
verify=self.vtn_ca_certs,
data=etree.tostring(payload),
auth=(self.__username, self.__password) if self.__username or self.__password else None
)
except Exception as ex:
logger.warning(f"Connection failed: {ex}")
return
reply = None
try:
payload = etree.fromstring(resp.content)
logger.debug(
f'Got Payload:\n'
f'{etree.tostring(payload, pretty_print=True).decode("utf-8")}'
)
reply = self.event_handler.handle_payload(payload)
except Exception as ex:
logger.warning(
f"Connection failed: error parsing payload\n"
f"{ex}: {resp.content}"
)
# If we have a generated reply:
if reply is not None:
logger.debug(
f'Reply to {event_uri}:\n'
f'{etree.tostring(reply, pretty_print=True).decode("utf-8")}'
)
# tell the control loop that events may have updated
# (note `self.event_controller` is defined in base.BaseHandler)
self.event_controller.events_updated()
self.send_reply(reply, event_uri) # And send the response
def send_reply(self, payload, uri):
'''
Send a reply back to the VTN.
payload -- An lxml.etree.ElementTree object containing an OpenADR 2.0
payload
uri -- The URI (of the VTN) where the response should be sent
'''
resp = requests.post(
uri,
cert=self.ven_certs,
verify=self.vtn_ca_certs,
data=etree.tostring(payload),
timeout=REQUEST_TIMEOUT,
auth=(self.__username, self.__password) if self.__username or self.__password else None
)
logger.debug("EiEvent response: %s", resp.status_code)
|
homersoft/oadr2-ven-python | setup.py | #!/usr/bin/env python
#from distutils.core import setup
from setuptools import find_packages, setup
setup(
name = 'oadr2-ven',
version = '1.0.2',
description = 'OpenADR 2.0a VEN for Python',
author = 'EnerNOC Advanced Technology',
author_email = '<EMAIL>',
url = 'http://open.enernoc.com',
packages = find_packages('.', exclude=['*.tests']),
install_requires = ['lxml', 'sleekxmpp', 'dnspython', 'python-dateutil', 'requests'],
tests_require = ['freezegun'],
zip_safe = False,
)
|
homersoft/oadr2-ven-python | oadr2/schemas.py | <reponame>homersoft/oadr2-ven-python
from datetime import datetime
from typing import List, Optional, Union
from lxml import etree
from pydantic import BaseModel
from oadr2 import schedule
# Stuff for the 2.0a spec of OpenADR
OADR_XMLNS_A = 'http://openadr.org/oadr-2.0a/2012/07'
PYLD_XMLNS_A = 'http://docs.oasis-open.org/ns/energyinterop/201110/payloads'
EI_XMLNS_A = 'http://docs.oasis-open.org/ns/energyinterop/201110'
EMIX_XMLNS_A = 'http://docs.oasis-open.org/ns/emix/2011/06'
XCAL_XMLNS_A = 'urn:ietf:params:xml:ns:icalendar-2.0'
STRM_XMLNS_A = 'urn:ietf:params:xml:ns:icalendar-2.0:stream'
NS_A = {
'oadr': OADR_XMLNS_A,
'pyld': PYLD_XMLNS_A,
'ei': EI_XMLNS_A,
'emix': EMIX_XMLNS_A,
'xcal': XCAL_XMLNS_A,
'strm': STRM_XMLNS_A
}
# Stuff for the 2.0b spec of OpenADR
OADR_XMLNS_B = 'http://openadr.org/oadr-2.0b/2012/07'
DSIG11_XMLNS_B = 'http://www.w3.org/2009/xmldsig11#'
DS_XMLNS_B = 'http://www.w3.org/2000/09/xmldsig#'
CLM5ISO42173A_XMLNS_B = 'urn:un:unece:uncefact:codelist:standard:5:ISO42173A:2010-04-07'
SCALE_XMLNS_B = 'http://docs.oasis-open.org/ns/emix/2011/06/siscale'
POWER_XMLNS_B = 'http://docs.oasis-open.org/ns/emix/2011/06/power'
GB_XMLNS_B = 'http://naesb.org/espi'
ATOM_XMLNS_B = 'http://www.w3.org/2005/Atom'
CCTS_XMLNS_B = 'urn:un:unece:uncefact:documentation:standard:CoreComponentsTechnicalSpecification:2'
GML_XMLNS_B = 'http://www.opengis.net/gml/3.2'
GMLSF_XMLNS_B = 'http://www.opengis.net/gmlsf/2.0'
XSI_XMLNS_B = 'http://www.w3.org/2001/XMLSchema-instance'
NS_B = { # If you see an 2.0a variable used here, that means that the namespace is the same
'oadr': OADR_XMLNS_B,
'pyld': PYLD_XMLNS_A,
'ei': EI_XMLNS_A,
'emix': EMIX_XMLNS_A,
'xcal': XCAL_XMLNS_A,
'strm': STRM_XMLNS_A,
'dsig11': DSIG11_XMLNS_B,
'ds': DS_XMLNS_B,
'clm': CLM5ISO42173A_XMLNS_B,
'scale': SCALE_XMLNS_B,
'power': POWER_XMLNS_B,
'gb': GB_XMLNS_B,
'atom': ATOM_XMLNS_B,
'ccts': CCTS_XMLNS_B,
'gml': GML_XMLNS_B,
'gmlsf': GMLSF_XMLNS_B,
'xsi': XSI_XMLNS_B
}
# Other important constants that we need
VALID_SIGNAL_TYPES = ('level', 'price', 'delta', 'setpoint')
OADR_PROFILE_20A = '2.0a'
OADR_PROFILE_20B = '2.0b'
class SignalSchema(BaseModel):
index: int
duration: str
level: float
class Config:
orm_mode = True
class EventSchema(BaseModel):
id: Union[str, None]
start: datetime
original_start: datetime
end: Union[datetime, None]
cancellation_offset: Union[str, None]
signals: List[SignalSchema]
group_ids: Optional[List[str]]
resource_ids: Optional[List[str]]
party_ids: Optional[List[str]]
ven_ids: Optional[List[str]]
market_context: Optional[str]
mod_number: int
status: str
test_event: bool
priority: int
class Config:
orm_mode = True
def get_current_interval(self, now=datetime.utcnow()) -> Union[SignalSchema, None]:
if self.start > now: # event not started yet
return None
if self.end and now > self.end: # event already ended
return None
previous_signal_end = self.start
for signal in self.signals:
if not bool(schedule.duration_to_delta(signal.duration)[0]):
return signal
current_signal_end = previous_signal_end + schedule.duration_to_delta(signal.duration)[0]
if previous_signal_end < now <= current_signal_end:
return signal
previous_signal_end = current_signal_end
def cancel(self, random_end=False):
if self.status == "active" or random_end:
self.end = schedule.random_offset(datetime.utcnow(), 0, self.cancellation_offset) if self.cancellation_offset else datetime.utcnow()
elif self.status == "cancelled":
pass
else:
self.end = datetime.utcnow()
self.status = "cancelled"
@staticmethod
def from_xml(evt_xml: etree.XML):
event_id = EventSchema.get_event_id(evt_xml)
event_original_start = EventSchema.get_active_period_start(evt_xml)
signal_list = EventSchema.get_signals(evt_xml)
event_signals = (
[
SignalSchema(
duration=evt[0],
index=int(evt[1]),
level=float(evt[2])
) for evt in signal_list
]
if signal_list else []
)
event_group_ids = EventSchema.get_group_ids(evt_xml)
event_resource_ids = EventSchema.get_resource_ids(evt_xml)
event_party_ids = EventSchema.get_party_ids(evt_xml)
event_ven_ids = EventSchema.get_ven_ids(evt_xml)
event_market_context = EventSchema.get_market_context(evt_xml)
event_mod_number = EventSchema.get_mod_number(evt_xml)
event_priority = EventSchema.get_priority(evt_xml)
event_status = EventSchema.get_status(evt_xml)
start_offset = EventSchema.get_start_before_after(evt_xml)
event_start = schedule.random_offset(event_original_start, *start_offset)
event_duration = EventSchema.get_active_period_duration(evt_xml)[0]
if bool(event_duration):
if event_status == "cancelled":
ending_time = event_start
else:
ending_time = event_duration + event_start
else:
ending_time = None
event_test = EventSchema.get_test_event(evt_xml)
return EventSchema(
id=event_id,
signals=event_signals,
start=event_start,
end=ending_time,
cancellation_offset=start_offset[1],
original_start=event_original_start,
group_ids=event_group_ids,
resource_ids=event_resource_ids,
party_ids=event_party_ids,
ven_ids=event_ven_ids,
market_context=event_market_context,
mod_number=event_mod_number,
priority=event_priority,
status=event_status,
test_event=event_test
)
@staticmethod
def get_event_id(evt, ns_map=NS_A):
return evt.findtext("ei:eventDescriptor/ei:eventID", namespaces=ns_map)
@staticmethod
def get_status(evt, ns_map=NS_A):
return evt.findtext("ei:eventDescriptor/ei:eventStatus", namespaces=ns_map)
@staticmethod
def get_test_event(evt, ns_map=NS_A):
test_event = evt.findtext(
"ei:eventDescriptor/ei:testEvent", namespaces=ns_map
)
if not test_event or test_event.lower() == "false":
return False
else:
return True
@staticmethod
def get_mod_number(evt, ns_map=NS_A):
return int(evt.findtext(
"ei:eventDescriptor/ei:modificationNumber",
namespaces=ns_map))
@staticmethod
def get_market_context(evt, ns_map=NS_A):
return evt.findtext(
"ei:eventDescriptor/ei:eiMarketContext/emix:marketContext",
namespaces=ns_map
)
@staticmethod
def get_current_signal_value(evt, ns_map=NS_A):
return evt.findtext(
'ei:eiEventSignals/ei:eiEventSignal/ei:currentValue/' + \
'ei:payloadFloat/ei:value', namespaces=ns_map)
@staticmethod
def get_signals(evt, ns_map=NS_A):
simple_signal = None
signals = []
for signal in evt.iterfind(
'ei:eiEventSignals/ei:eiEventSignal', namespaces=ns_map
):
signal_name = signal.findtext('ei:signalName', namespaces=ns_map)
signal_type = signal.findtext('ei:signalType', namespaces=ns_map)
if signal_name == 'simple' and signal_type in VALID_SIGNAL_TYPES:
simple_signal = signal # This is A profile only conformance rule!
if simple_signal is None:
return None
for interval in simple_signal.iterfind('strm:intervals/ei:interval', namespaces=ns_map):
duration = interval.findtext('xcal:duration/xcal:duration', namespaces=ns_map)
uid = interval.findtext('xcal:uid/xcal:text', namespaces=ns_map)
value = interval.findtext('ei:signalPayload//ei:value', namespaces=ns_map)
signals.append((duration, uid, value))
return signals
@staticmethod
def get_active_period_start(evt, ns_map=NS_A):
dttm_str = evt.findtext(
'ei:eiActivePeriod/xcal:properties/xcal:dtstart/xcal:date-time',
namespaces=ns_map)
return schedule.str_to_datetime(dttm_str)
@staticmethod
def get_active_period_duration(evt, ns_map=NS_A):
dttm_str = evt.findtext(
'ei:eiActivePeriod/xcal:properties/xcal:duration/xcal:duration',
namespaces=ns_map)
return schedule.duration_to_delta(dttm_str)
@staticmethod
def get_start_before_after(evt, ns_map=NS_A):
return (
evt.findtext(
'ei:eiActivePeriod/xcal:properties/xcal:tolerance/xcal:tolerate/xcal:startbefore',
namespaces=ns_map
),
evt.findtext(
'ei:eiActivePeriod/xcal:properties/xcal:tolerance/xcal:tolerate/xcal:startafter',
namespaces=ns_map
)
)
@staticmethod
def get_group_ids(evt, ns_map=NS_A):
return [e.text for e in evt.iterfind('ei:eiTarget/ei:groupID', namespaces=ns_map)]
@staticmethod
def get_resource_ids(evt, ns_map=NS_A):
return [e.text for e in evt.iterfind('ei:eiTarget/ei:resourceID', namespaces=ns_map)]
@staticmethod
def get_party_ids(evt, ns_map=NS_A):
return [e.text for e in evt.iterfind('ei:eiTarget/ei:partyID', namespaces=ns_map)]
@staticmethod
def get_ven_ids(evt, ns_map=NS_A):
return [e.text for e in evt.iterfind('ei:eiTarget/ei:venID', namespaces=ns_map)]
@staticmethod
def get_priority(evt, ns_map=NS_A):
return int(evt.findtext("ei:eventDescriptor/ei:priority", namespaces=ns_map) or 1)
|
homersoft/oadr2-ven-python | test/test_conformance.py | <reponame>homersoft/oadr2-ven-python<gh_stars>1-10
from datetime import datetime, timedelta
from test.adr_event_generator import AdrEvent, AdrEventStatus, generate_payload
from unittest import mock
import pytest
from freezegun import freeze_time
from oadr2 import controller, event
from oadr2.poll import OpenADR2
from oadr2.schemas import NS_A
TEST_DB_ADDR = "%s/test2.db"
responseCode = 'pyld:eiCreatedEvent/ei:eiResponse/ei:responseCode'
requestID = 'pyld:eiCreatedEvent/ei:eventResponses/ei:eventResponse/pyld:requestID'
optType = 'pyld:eiCreatedEvent/ei:eventResponses/ei:eventResponse/ei:optType'
venID = 'pyld:eiCreatedEvent/ei:venID'
eventResponse = "pyld:eiCreatedEvent/ei:eventResponses/ei:eventResponse"
def test_6_test_event(tmpdir):
"""
VEN, EiEvent Service, oadrDistributeEvent Payload
The presence of any string except “false” in the oadrDisributeEvent
testEvent element is treated as a trigger for a test event.
"""
test_event = AdrEvent(
id="FooEvent",
start=datetime.utcnow()-timedelta(seconds=60),
signals=[dict(index=0, duration=timedelta(minutes=10), level=1.0)],
status=AdrEventStatus.ACTIVE, test_event=True
)
event_handler = event.EventHandler("VEN_ID", db_path=TEST_DB_ADDR % tmpdir)
event_controller = controller.EventController(event_handler)
event_handler.handle_payload(generate_payload([test_event]))
signal_level, evt_id, remove_events = event_controller._calculate_current_event_status([test_event.to_obj()])
assert (signal_level, evt_id, remove_events) == (0, None, [])
active_event = event_handler.get_active_events()[0]
expected_event = test_event.to_obj()
assert active_event == expected_event
@pytest.mark.parametrize(
"response_required",
[
pytest.param(
False,
id="response required"
),
pytest.param(
True,
id="response not required"
),
]
)
def test_12_response_required(response_required, tmpdir):
"""
VEN, EiEvent Service, oadrCreatedEvent Payload
The VEN must respond to an event in oadrDistributeEvent based upon the
value in each event’s oadrResponseRequired element as follows:
Always – The VEN shall respond to the event with an oadrCreatedEvent
eventResponse . This includes unchanged, new, changed, and cancelled
events
Never – The VEN shall not respond to the event with a oadrCreatedEvent
eventResponse
Note that oadrCreatedEvent event responses SHOULD be returned in one
message, but CAN be returned in separate messages.
"""
test_event = AdrEvent(
id="FooEvent",
start=datetime.utcnow() - timedelta(seconds=60),
signals=[dict(index=0, duration=timedelta(minutes=10), level=1.0)],
status=AdrEventStatus.ACTIVE, response_required=response_required
)
event_handler = event.EventHandler("VEN_ID", db_path=TEST_DB_ADDR % tmpdir)
reply = event_handler.handle_payload(generate_payload([test_event]))
assert bool(reply) == response_required
def test_18_overlaping_events(tmpdir):
"""
VEN/VTN, EiEvent Service
The VEN/VTN must honor the following rules with regards to overlapping
active periods...
DR events with overlapping active periods may be issued, but only if they
are from different marketContexts and only if the programs have a priority
associated with them. DR events for programs with higher priorities
supersede the events of programs with lower priorities. If two programs with
overlapping events have the same priority then the program whose event
was activated first takes priority.
The behavior of a VEN is undefined with respect to the receipt on an
overlapping event in the same market context. The VTN shall not send
overlapping events in the same market context, including events that could
potentially overlap a randomized event cancellation. Nothing in this rule
should preclude a VEN from opting into overlapping events in different
market contexts.
"""
expected_events = [
AdrEvent(
id="FooEvent1",
start=datetime.utcnow() - timedelta(seconds=60),
signals=[dict(index=0, duration=timedelta(minutes=10), level=1.0)],
status=AdrEventStatus.ACTIVE, market_context="context1", priority=1
),
AdrEvent(
id="FooEvent2",
start=datetime.utcnow() - timedelta(seconds=60),
signals=[dict(index=0, duration=timedelta(minutes=10), level=2.0)],
status=AdrEventStatus.ACTIVE, market_context="context2", priority=2
),
]
event_handler = event.EventHandler(
"VEN_ID",
db_path=TEST_DB_ADDR % tmpdir,
vtn_ids="TH_VTN",
market_contexts="context1,context2"
)
event_controller = controller.EventController(event_handler)
event_handler.handle_payload(generate_payload(expected_events))
active_events = event_handler.get_active_events()
signal_level, evt_id, remove_events = event_controller._calculate_current_event_status(active_events)
assert (signal_level, evt_id, remove_events) == (2.0, "FooEvent2", [])
def test_19_valid_invalid_events(tmpdir):
"""
VEN, EiEvent Service, oadrDistributeEvent Payload
If an oadrDistributeEvent payload has as mix of valid and invalid events,
the implementation shall only respond to the relevant valid events and not
reject the entire message.
"""
expected_events = [
AdrEvent(
id="FooEvent",
start=datetime.utcnow() + timedelta(seconds=60),
signals=[dict(index=0, duration=timedelta(seconds=10), level=1.0)],
status=AdrEventStatus.PENDING
),
AdrEvent(
id="FooFailed",
start=datetime.utcnow() + timedelta(seconds=160),
signals=[dict(index=0, duration=timedelta(seconds=10), level=1.0)],
status=AdrEventStatus.PENDING, ven_ids=["Wrong_Ven"]
),
AdrEvent(
id="AnotherFooEvent",
start=datetime.utcnow() + timedelta(seconds=260),
signals=[dict(index=0, duration=timedelta(seconds=10), level=1.0)],
status=AdrEventStatus.PENDING
)
]
db_mock = mock.MagicMock()
event_handler = event.EventHandler(
"VEN_ID",
db_path=TEST_DB_ADDR % tmpdir,
vtn_ids="TH_VTN"
)
event_handler.db.update_event = db_mock
reply = event_handler.handle_payload(generate_payload(expected_events))
assert reply.findtext(venID, namespaces=NS_A) == "VEN_ID"
assert reply.findtext(responseCode, namespaces=NS_A) == "200"
for event_reply in reply.iterfind(eventResponse, namespaces=NS_A):
event_id = event_reply.findtext("ei:qualifiedEventID/ei:eventID", namespaces=NS_A)
assert reply.findtext(requestID, namespaces=NS_A) == "OadrDisReq092520_152645_178"
if event_id == "FooFailed":
assert event_reply.findtext("ei:responseCode", namespaces=NS_A) == "403"
assert event_reply.findtext("ei:optType", namespaces=NS_A) == "optOut"
else:
assert event_reply.findtext("ei:responseCode", namespaces=NS_A) == "200"
assert event_reply.findtext("ei:optType", namespaces=NS_A) == "optIn"
def test_21a_ven_id_validation(tmpdir):
"""
VEN/VTN, EiEvent Service, oadrDistributeEvent Payload
If venID, vtnID, or EventID is included in payloads, the receiving entity must
validate the ID values are as expected and generate an error if no ID is
present or an unexpected value is received.
Exception: A VEN shall not generate an error upon receipt of a cancelled
event whose eventID is not previously known.
"""
expected_event = AdrEvent(
id="FooEvent",
start=datetime.utcnow() + timedelta(seconds=60),
signals=[dict(index=0, duration=timedelta(seconds=10), level=1.0)],
status=AdrEventStatus.PENDING, ven_ids=["Wrong_Ven"]
)
db_mock = mock.MagicMock()
event_handler = event.EventHandler(
"VEN_ID",
db_path=TEST_DB_ADDR % tmpdir,
vtn_ids="TH_VTN"
)
event_handler.db.update_event = db_mock
reply = event_handler.handle_payload(generate_payload([expected_event]))
assert reply.findtext(responseCode, namespaces=NS_A) == "200"
assert reply.findtext(requestID, namespaces=NS_A) == "OadrDisReq092520_152645_178"
assert reply.findtext(optType, namespaces=NS_A) == "optOut"
assert reply.findtext(venID, namespaces=NS_A) == "VEN_ID"
def test_21b_vtn_id_validation(tmpdir):
expected_event = AdrEvent(
id="FooEvent",
start=datetime.utcnow() + timedelta(seconds=60),
signals=[dict(index=0, duration=timedelta(seconds=10), level=1.0)],
status=AdrEventStatus.PENDING
)
db_mock = mock.MagicMock()
event_handler = event.EventHandler(
"VEN_ID",
db_path=TEST_DB_ADDR % tmpdir,
vtn_ids="Wrong_Vtn"
)
event_handler.db.update_event = db_mock
reply = event_handler.handle_payload(generate_payload([expected_event]))
assert reply.findtext(responseCode, namespaces=NS_A) == "400"
assert reply.findtext('pyld:eiCreatedEvent/ei:eiResponse/pyld:requestID', namespaces=NS_A) == "OadrDisReq092520_152645_178"
assert reply.findtext(venID, namespaces=NS_A) == "VEN_ID"
@pytest.mark.parametrize(
"expected_event",
[
pytest.param(
AdrEvent(
id="FooEvent",
start=datetime.utcnow() + timedelta(seconds=60),
signals=[dict(index=0, duration=timedelta(seconds=10), level=1.0)],
status=AdrEventStatus.PENDING, resource_ids=["resource_id"], ven_ids=[]
),
id="resource_id"
),
pytest.param(
AdrEvent(
id="FooEvent",
start=datetime.utcnow() + timedelta(seconds=60),
signals=[dict(index=0, duration=timedelta(seconds=10), level=1.0)],
status=AdrEventStatus.PENDING, party_ids=["party_id"], ven_ids=[]
),
id="party_id"
),
pytest.param(
AdrEvent(
id="FooEvent",
start=datetime.utcnow() + timedelta(seconds=60),
signals=[dict(index=0, duration=timedelta(seconds=10), level=1.0)],
status=AdrEventStatus.PENDING, group_ids=["group_id"], ven_ids=[]
),
id="group_id"
),
pytest.param(
AdrEvent(
id="FooEvent",
start=datetime.utcnow() + timedelta(seconds=60),
signals=[dict(index=0, duration=timedelta(seconds=10), level=1.0)],
status=AdrEventStatus.PENDING
),
id="ven_id"
),
]
)
def test_22_target_validation(expected_event, tmpdir):
"""
VEN, EiEvent Service, oadrDistributeEvent Payload
If no sub elements are present in oadrDistributeEvent eiTarget, the
presumption is that the recipient is the intended target of the event. If
multiple criteria are present in eiTarget subelements, the values are OR’d
togther to determine whether the VEN is a target for the event. However,
the VENs behavior with respect to responding to an event when it matches
one of the eiTarget criteria is implementation dependent.
"""
db_mock = mock.MagicMock()
event_handler = event.EventHandler(
"VEN_ID",
db_path=TEST_DB_ADDR % tmpdir,
resource_id="resource_id",
party_id="party_id",
group_id="group_id"
)
event_handler.db.update_event = db_mock
reply = event_handler.handle_payload(generate_payload([expected_event]))
assert reply.findtext(responseCode, namespaces=NS_A) == "200"
assert reply.findtext(requestID, namespaces=NS_A) == "OadrDisReq092520_152645_178"
assert reply.findtext(optType, namespaces=NS_A) == "optIn"
assert reply.findtext(venID, namespaces=NS_A) == "VEN_ID"
@pytest.mark.skip(reason="No need to test")
def test_23_oadrRequestEvent():
"""
VEN/VTN, EiEvent Service, oadrRequestEvent Payload
oadrRequestEvent many only be sent in the VEN to VTN direction
"""
assert False
@pytest.mark.skip(reason="Covered in other tests")
def test_25_error_reporting():
"""
VEN/VTN, EiEvent Service
VTN and VEN: The following rules must be followed with respect to
application level responses with respect to multiple events:
1)If the Response indicates success, there is no need to examine each
element in the Responses.
2)If some elements fail and other succeed, the Response will indicate the
error, and the recipient should evaluate each element in Responses to
discover which components of the operation failed.
Exception: For oadrCreatedEvent, the presence of a failure indication in
eventResponse:responseCode shall not force a failure indication in
eiResponse:responseCode. Typical behavior would be for the VEN to report
a success indication in eiResponse:responseCode and indicate any event
specific errors in eventResponse:responseCode. The
"""
assert False
def test_30_start_time_randomization(tmpdir):
"""
VEN, EiEvent Service, oadrDistributeEvent Payload
The VEN must randomize the dtstart time of the event if a value is present
in the startafter element. Event completion times are determined by adding
the event duration to the randomized dtstart time. Modifications to an event
should maintain the same random offset, unless the startafter element itself
is modified.
"""
test_event = AdrEvent(
id="FooEvent",
start=datetime.utcnow() + timedelta(minutes=10),
signals=[dict(index=0, duration=timedelta(minutes=10), level=1.0)],
status=AdrEventStatus.PENDING, start_after=timedelta(minutes=2)
)
event_handler = event.EventHandler("VEN_ID", db_path=TEST_DB_ADDR % tmpdir)
event_handler.handle_payload(generate_payload([test_event]))
active_event = event_handler.get_active_events()[0]
expected_event = test_event.to_obj()
assert active_event.start != expected_event.start
assert (active_event.start - expected_event.start) < timedelta(minutes=2)
@pytest.mark.skip(reason="Covered in other tests")
def test_31_active_period_subelements():
"""
# VEN, EiEvent Service, oadrDistributeEvent Payload
# The VEN must recognize and act upon values specified in the subelements
# of activePeriod including:
# dtStart
# duration
# tolerence
# x-eiRampUp (positive and negative)
# x-eiRecovery (positive and negative)
# Note: x-eiRampup and x-eiRecovery are not testable requirements
"""
assert False
@pytest.mark.skip(reason="Covered in other tests")
def test_32_intervals_subelements():
"""
VEN/VTN, EiEvent Service, oadrDistributeEvent Payload
The VEN must recognize and act upon values specified in the subelements
of intervals including:
duration
signalPayload
"""
assert False
@pytest.mark.skip(reason="Covered in other tests")
def test_31_event_error_indication():
"""
VEN/VTN
The implementation must provide an application layer error indication as a
result of the following conditions:
Schema does not validate
Missing expected information
Payload not of expected type
ID not as expected
Illogical request – Old date on new event, durations don’t add up
correctly, etc.
Etc.
"""
assert False
def test_35_response_created_event(tmpdir):
"""
VEN, EiEvent Service, oadrCreatedEvent Payload
The eiResponses element in oadrCreatedEvent is mandatory, except when
an error condition is reported in eiResponse.
"""
test_event = AdrEvent(
id="FooEvent",
start=datetime.utcnow() + timedelta(minutes=10),
signals=[dict(index=0, duration=timedelta(minutes=10), level=1.0)],
status=AdrEventStatus.PENDING
)
event_handler = event.EventHandler("VEN_ID", db_path=TEST_DB_ADDR % tmpdir)
reply = event_handler.handle_payload(generate_payload([test_event]))
assert bool(reply.find("pyld:eiCreatedEvent/ei:eventResponses", namespaces=NS_A))
def test_36_cancellation_acknowledgement(tmpdir):
"""
VEN, EiEvent Service, oadrCreatedEvent Payload
An event cancellation received by the VEN must be acknowledged with an
oadrCreatedEvent with the optType element set as follows, unless the
oadrResponseRequired is set to ‘never”:
optIn = Confirm to cancellation
optOut = Cannot cancel
Note: Once an event cancellation is acknowledged by the VEN, the event
shall not be included in subsequent oadrCreatedEvent payloads unless the
VTN includes this event in a subsequent oadrDistributeEvent payload.
"""
test_event = AdrEvent(
id="FooEvent",
start=datetime.utcnow() + timedelta(minutes=10),
signals=[dict(index=0, duration=timedelta(minutes=10), level=1.0)],
status=AdrEventStatus.CANCELLED
)
db_mock = mock.MagicMock()
event_handler = event.EventHandler("VEN_ID", db_path=TEST_DB_ADDR % tmpdir)
event_handler.db.update_event = db_mock
reply = event_handler.handle_payload(generate_payload([test_event]))
assert reply.findtext(responseCode, namespaces=NS_A) == "200"
assert reply.findtext(requestID, namespaces=NS_A) == "OadrDisReq092520_152645_178"
assert reply.findtext(optType, namespaces=NS_A) == "optIn"
assert reply.findtext(venID, namespaces=NS_A) == "VEN_ID"
db_mock.assert_not_called()
@pytest.mark.skip(reason="No need to test")
def test_37_push_pull_model():
"""
VEN
A VEN Implementation must support pull model and can optionally also
support push
"""
assert False
@pytest.mark.skip(reason="Covered in other tests")
def test_41_request_id():
"""
VEN/VTN, EiEvent Service, oadrDistributeEvent Payload
The VTN must send a requestID value as part of the oadrDistributeEvent payload.
Note: The requestID value is not required to be unique, and in fact may be the
same for all oadrDistributeEvent payloads. That there are two requestID fields
in oadrDistributeEvent. The feild that must be populated with a requestID is
located at oadrDistributeEvent:requestID
"""
assert False
def test_42_request_id(tmpdir):
"""
VEN, EiEvent Service, oadrCreatedEvent Payload
A VEN receiving an oadrDistributeEvent eiEvent must use the received requestID
value in the EiCreatedEvent eventResponse when responding to the event. This
includes any and all subsequent EiCreatedEvent messages that may be sent to
change the opt status of the VEN.
The eiResponse:requestID in oadrCreatedEvent shall be left empty if the
payload contains eventResponses. The VTN shall
look inside each
eventResponse for the relevant requestID
"""
test_event = AdrEvent(
id="FooEvent",
start=datetime.utcnow() + timedelta(minutes=10),
signals=[dict(index=0, duration=timedelta(minutes=10), level=1.0)],
status=AdrEventStatus.PENDING, start_after=timedelta(minutes=2)
)
db_mock = mock.MagicMock()
event_handler = event.EventHandler("VEN_ID", db_path=TEST_DB_ADDR % tmpdir)
event_handler.db.update_event = db_mock
reply = event_handler.handle_payload(generate_payload([test_event]))
assert reply.findtext(
'pyld:eiCreatedEvent/ei:eventResponses/ei:eventResponse/pyld:requestID',
namespaces=NS_A
) == "OadrDisReq092520_152645_178"
@pytest.mark.skip(reason="No need to test")
def test_43_request_id_uniqueness():
"""
VEN, EiEvent Service, oadrDistributeEvent Payload
The VEN must make no assumptions regarding the uniqueness of requestID values
received from the VTN in the oadrDistributePayload
"""
assert False
@pytest.mark.skip(reason="No need to test")
def test_44_empty_request_id():
"""
VEN/VTN
With the exception of oadrDistributeEvent and oadrCreatedEvent payloads,
requestID may be an empty element in other payloads and if a requestID value is
present, it may be ignored
"""
assert False
@pytest.mark.skip(reason="No need to test")
def test_45_schema_location():
"""
VEN/VTN
Messages sent between VENs and VTNs shall
*not* include a
schemaLocation attribute
"""
assert False
@pytest.mark.skip(reason="Covered in other tests")
def test_46_optional_elements():
"""
VEN/VTN
Optional elements do not need to be included in outbound payloads, but if
they are, the VEN or VTN receiving the payload must understand and act
upon those optional elements
"""
assert False
def test_47_unending_event(tmpdir):
"""
VEN/VTN, EiEvent Service, oadrDistributeEvent Payload
An event with an overall duration of 0 indicates an event with no defined
end time and will remain active until explicitly cancelled.
"""
test_event = AdrEvent(
id="FooEvent",
start=datetime.utcnow() + timedelta(seconds=60),
signals=[dict(index=0, duration=timedelta(minutes=0), level=1.0)],
status=AdrEventStatus.ACTIVE
)
event_handler = event.EventHandler("VEN_ID", db_path=TEST_DB_ADDR % tmpdir)
event_controller = controller.EventController(event_handler)
event_handler.handle_payload(generate_payload([test_event]))
active_event = event_handler.get_active_events()[0]
signal_level, evt_id, remove_events = event_controller._calculate_current_event_status([active_event])
assert (signal_level, evt_id, remove_events) == (0, None, [])
with freeze_time(datetime.utcnow() + timedelta(seconds=70)):
signal_level, evt_id, remove_events = event_controller._calculate_current_event_status([active_event])
assert (signal_level, evt_id, remove_events) == (1.0, "FooEvent", [])
with freeze_time(datetime.utcnow() + timedelta(minutes=70)):
signal_level, evt_id, remove_events = event_controller._calculate_current_event_status([active_event])
assert (signal_level, evt_id, remove_events) == (1.0, "FooEvent", [])
with freeze_time(datetime.utcnow() + timedelta(hours=70)):
signal_level, evt_id, remove_events = event_controller._calculate_current_event_status([active_event])
assert (signal_level, evt_id, remove_events) == (1.0, "FooEvent", [])
test_event.status = AdrEventStatus.CANCELLED
test_event.mod_number += 1
event_handler.handle_payload(generate_payload([test_event]))
active_event = event_handler.get_active_events()[0]
signal_level, evt_id, remove_events = event_controller._calculate_current_event_status([active_event])
assert (signal_level, evt_id, remove_events) == (0, None, ["FooEvent"])
@pytest.mark.parametrize(
"expected_event",
[
pytest.param(
AdrEvent(
id="FooEvent",
start=datetime.utcnow() + timedelta(seconds=60),
signals=[dict(index=0, duration=timedelta(seconds=10), level=1.0)],
status=AdrEventStatus.PENDING, market_context="http://bad.context"
),
id="market_context"
),
pytest.param(
AdrEvent(
id="FooEvent",
start=datetime.utcnow() + timedelta(seconds=60),
signals=[dict(index=0, duration=timedelta(seconds=10), level=1.0)],
status=AdrEventStatus.PENDING, signal_name="bad"
),
id="signal_name"
),
]
)
def test_48_payload_error_indication(expected_event, tmpdir):
"""
When a VTN or VEN receives schema compliant oadr payload that has
logical errors, the receiving device must provide an application layer error
indication of 4xx. The detailed error message number is informational and
not a requirement for response to a specific scenario. If the error is in an
event contained in an oadrDistributeEvent payload, it should be reported in
the eventResponse element of oadrCreatedEvent. The following logical
errors must be detected by implementations:
VEN receives non-matching market context
VEN receives non-matching eiTarget
VEN receives unsupported signalName
VTN receives non-matching eventID in oadrCreatedEvent Response
VTN receives mismatched modificationNumber in oadrCreatedEvent
"""
db_mock = mock.MagicMock()
event_handler = event.EventHandler(
"VEN_ID",
market_contexts="http://market.context",
db_path=TEST_DB_ADDR % tmpdir,
resource_id="resource_id",
party_id="party_id",
group_id="group_id"
)
event_handler.db.update_event = db_mock
reply = event_handler.handle_payload(generate_payload([expected_event]))
assert reply.findtext(responseCode, namespaces=NS_A) == "200"
assert reply.findtext(requestID, namespaces=NS_A) == "OadrDisReq092520_152645_178"
assert reply.findtext(optType, namespaces=NS_A) == "optOut"
assert reply.findtext(venID, namespaces=NS_A) == "VEN_ID"
@pytest.mark.skip(reason="No need to test")
def test_50_distributed_event():
"""
VEN/VTN, EiEvent Service, oadrDistributeEvent Payload
In both the push and pull model, oadrDistributeEvent MUST contain all
existing events which have the eventStatus element set to either FAR,
NEAR, or ACTIVE. Events with an eventStatus of cancelled MUST be
included in the payload upon change to the modificationNumber and MAY
be included in subsequent payloads.
"""
assert False
@pytest.mark.skip(reason="No need to test")
def test_52_cancellation_acknowledgment():
"""
VTN, EiEvent Service, oadrDistributeEvent Payload
If a VTN requests acknowledgment of a cancelled event with
oadrResponserequired of always, the VTN shall continue to send the
cancelled event to the VEN until the event is acknowledged, eventStatus
transitions to the complete state, or some well defined number of retries is
attempted
"""
assert False
@pytest.mark.skip(reason="No need to test")
def test_53_http_transport():
"""
VEN/VTN
Shall implement the simple http transport. Including support for the
following mandatory http headers:
Host
Content-Length
Content-Type of application/xml
"""
assert False
@pytest.mark.skip(reason="No need to test")
def test_54_polling_frequency():
"""
VEN
HTTP PULL VEN’s MUST be able to guarantee worst case latencies for the
delivery of information from the VTN by using deterministic and well defined
polling frequencies. The VEN SHOULD support the ability for its polling
frequency to be configured to support varying latency requirements. If the
VEN intends to poll for information at varying frequencies based upon
attributes of the information being exchanged (e.g. market context) then the
VEN MUST support the configuration of polling frequencies on a per
attribute basis.
"""
assert False
def test_55_max_polling_frequency():
"""
VEN
HTTP PULL VEN’s MUST NOT poll the VTN on average greater than some
well defined and deterministic frequency. THE VEN SHOULD support the
ability for the maximum polling frequency to be configured.
"""
with pytest.raises(AssertionError):
OpenADR2(
event_config=dict(
ven_id="TH_VEN"
),
vtn_base_uri="",
vtn_poll_interval=9,
start_thread=False,
)
def test_56_new_event(tmpdir):
"""
VEN, EiEvent Service, oadrDistributeEvent Payload
If the VTN sends an oadrEvent with an eventID that the VEN is not aware
then it should process the event and add it to its list of known events
"""
test_event = AdrEvent(
id="FooEvent",
start=datetime.utcnow()+timedelta(seconds=60),
signals=[dict(index=0, duration=timedelta(minutes=10), level=1.0)],
status=AdrEventStatus.PENDING
)
event_handler = event.EventHandler("VEN_ID", db_path=TEST_DB_ADDR % tmpdir)
event_controller = controller.EventController(event_handler)
event_handler.handle_payload(generate_payload([test_event]))
signal_level, evt_id, remove_events = event_controller._calculate_current_event_status([test_event.to_obj()])
assert (signal_level, evt_id, remove_events) == (0, None, [])
active_event = event_handler.get_active_events()[0]
expected_event = test_event.to_obj()
assert active_event == expected_event
with freeze_time(datetime.utcnow()+timedelta(seconds=70)):
signal_level, evt_id, remove_events = event_controller._calculate_current_event_status([test_event.to_obj()])
assert (signal_level, evt_id, remove_events) == (1.0, "FooEvent", [])
def test_57_modified_event(tmpdir):
"""
VEN/VTN, EiEvent Service, oadrDistributeEvent Payload
If the VTN sends an oadrEvent with an eventID that the VEN is already
aware of, but with a higher modification number then the VEN should
replace the previous event with the new one In its list of known events.
"""
test_event = AdrEvent(
id="FooEvent",
start=datetime.utcnow() + timedelta(seconds=60),
signals=[dict(index=0, duration=timedelta(minutes=10), level=1.0)],
status=AdrEventStatus.PENDING
)
event_handler = event.EventHandler("VEN_ID", db_path=TEST_DB_ADDR % tmpdir)
event_handler.handle_payload(generate_payload([test_event]))
active_event = event_handler.get_active_events()[0]
expected_event = test_event.to_obj()
assert active_event == expected_event
test_event.mod_number = 1
test_event.status = AdrEventStatus.ACTIVE
event_handler.handle_payload(generate_payload([test_event]))
active_event = event_handler.get_active_events()[0]
expected_event = test_event.to_obj()
assert active_event == expected_event
def test_58_modified_event_error(tmpdir):
"""
VEN, EiEvent Service, oadrDistributeEvent Payload
If the VTN sends an oadrEvent with an eventID that the VEN is already
aware of, but which has a lower modification number than one in which the
VEN is already aware then this is an ERROR and the VEN should respond
with the appropriate error code. Note that this is true regardless of the
event state including cancelled.
"""
test_event1 = AdrEvent(
id="FooEvent",
start=datetime.utcnow() + timedelta(seconds=60),
signals=[dict(index=0, duration=timedelta(minutes=10), level=1.0)],
status=AdrEventStatus.PENDING, mod_number=5
)
test_event2 = AdrEvent(
id="FooEvent",
start=datetime.utcnow() + timedelta(seconds=60),
signals=[dict(index=0, duration=timedelta(minutes=10), level=1.0)],
status=AdrEventStatus.PENDING, mod_number=3
)
event_handler = event.EventHandler("VEN_ID", db_path=TEST_DB_ADDR % tmpdir)
event_handler.handle_payload(generate_payload([test_event1]))
active_event = event_handler.get_active_events()[0]
expected_event = test_event1.to_obj()
assert active_event == expected_event
event_handler.handle_payload(generate_payload([test_event2]))
active_event = event_handler.get_active_events()[0]
assert active_event == expected_event
def test_59_event_cancellation(tmpdir):
"""
VEN, EiEvent Service, oadrDistributeEvent Payload
If the VTN sends an oadrEvent with the eventStatus set to cancelled and
has an eventID that the VEN is aware of then the VEN should cancel the
existing event and delete it from its list of known events.
"""
test_event = AdrEvent(
id="FooEvent",
start=datetime.utcnow() + timedelta(seconds=60),
signals=[dict(index=0, duration=timedelta(minutes=10), level=1.0)],
status=AdrEventStatus.PENDING, mod_number=1
)
event_handler = event.EventHandler("VEN_ID", db_path=TEST_DB_ADDR % tmpdir)
event_controller = controller.EventController(event_handler)
event_handler.handle_payload(generate_payload([test_event]))
active_event = event_handler.get_active_events()[0]
assert active_event == test_event.to_obj()
with freeze_time():
test_event.status = AdrEventStatus.CANCELLED
test_event.mod_number += 1
test_event.end = datetime.utcnow()
event_handler.handle_payload(generate_payload([test_event]))
active_event = event_handler.get_active_events()[0]
assert active_event == test_event.to_obj()
signal_level, evt_id, remove_events = event_controller._calculate_current_event_status([test_event.to_obj()])
assert (signal_level, evt_id, remove_events) == (0, None, ["FooEvent"])
def test_60_new_cancelled_event(tmpdir):
"""
VEN, EiEvent Service, oadrDistributeEvent, oadrCreatedEvent Payload
If the VTN sends an oadrEvent with the eventStatus set to cancelled and
has an eventID that the VEN is not aware of then the VEN should ignore
the event since it is not currently in its list of known events, but still must
respond with the createdEvent if required to do so by oadrResponseRequired
"""
test_event = AdrEvent(
id="FooEvent",
start=datetime.utcnow() - timedelta(seconds=60),
signals=[dict(index=0, duration=timedelta(minutes=10), level=1.0)],
status=AdrEventStatus.CANCELLED, mod_number=1
)
event_handler = event.EventHandler("VEN_ID", db_path=TEST_DB_ADDR % tmpdir)
event_controller = controller.EventController(event_handler)
reply = event_handler.handle_payload(generate_payload([test_event]))
assert reply.findtext(
responseCode,
namespaces=NS_A
) == "200"
assert reply.findtext(
optType,
namespaces=NS_A
) == "optIn"
active_event = event_handler.get_active_events()[0]
signal_level, evt_id, remove_events = event_controller._calculate_current_event_status([active_event])
assert (signal_level, evt_id, remove_events) == (0, None, ["FooEvent"])
@pytest.mark.skip(reason="Covered in other tests")
def test_61_implied_cancellation():
"""
VEN, EiEvent Service, oadrDistributeEvent Payload
If the VTN sends the oadrDistributeEvent payload and it does not contain
an event for which the VEN is aware (i.e. in its list of known events) then
the VEN must delete it from its list of known event (i.e. implied cancel).
Exception: A VEN that has an active event that cannot be immediately
stopped for operational reasons, may leave the event in its data store until
the event expires or the event can be stopped.
"""
assert False
@pytest.mark.skip(reason="Covered in other tests")
def test_62_response():
"""
VEN, EiEvent Service, oadrDistributeEvent, oadrCreatedEvent Payload
The VEN must process EVERY oadrEvent event message (new, modified,
cancelled, etc.) that it receives from the VTN in an oadrDistributeEvent
payload and it MUST reply with a createdEvent message for every EIEvent
message in which the responseRequired is set to always. Furthermore if
the responseRequired is set to never, the VEN MUST NOT respond with a
createdEvent message. It is at the complete discretion of the VTN as to
whether responses are required from the VEN. Note that this rule is
universal and applies to all scenarios including the following:
The event is one in which the VEN is already aware.
The event is being cancelled and the VEN did not even know it existed
It does not matter how the EIEvent payloads were delivered, i.e.
PUSH, PULL or as the result of being delivered in an ALL payload
"""
assert False
@pytest.mark.skip(reason="Covered in other tests")
def test_64_polling_cycle():
"""
VEN, EiEvent Service
A pull VEN shall respond to all received events before initiating another
polling cycle.
"""
assert False
def test_65_cancellation_time_randomization(tmpdir):
"""
VEN, EiEvent Service, oadrDistributeEvent, oadrCreatedEvent Payload
When an event containing a randomization value in the startafter element is
cancelled, either explicitly or implicitly, the VEN MUST randomize its
termination of the event. The randomization window should be between 0
and a duration equal to the value specified in startafter.
"""
test_event = AdrEvent(
id="FooEvent",
start=datetime.utcnow() - timedelta(minutes=5),
signals=[dict(index=0, duration=timedelta(minutes=10), level=1.0)],
status=AdrEventStatus.ACTIVE, start_after=timedelta(minutes=2)
)
event_handler = event.EventHandler("VEN_ID", db_path=TEST_DB_ADDR % tmpdir)
event_controller = controller.EventController(event_handler)
event_handler.handle_payload(generate_payload([test_event]))
with freeze_time():
test_event.mod_number += 1
test_event.status = AdrEventStatus.CANCELLED
event_handler.handle_payload(generate_payload([test_event]))
active_event = event_handler.get_active_events()[0]
assert active_event.end != datetime.utcnow()
assert (active_event.start - datetime.utcnow()) < timedelta(minutes=2)
@pytest.mark.skip(reason="No need to test")
def test_66_cancelled_event_handling():
"""
VEN/VTN, EiEvent Service, oadrDistributeEvent, Payload
If a VTN sends an oadrDistributeEvent payload containing an event with a
startafter element with a value greater than zero, the VTN must continue to
include the event in oadrDistributeEvent payloads, even if the event is
complete, until current time is equal to dtStart plus duration plus startafter.
The receipt of an eventStatus equal to completed shall not cause the VEN
to change its operational status with respect to executing the event.
"""
assert False
@pytest.mark.skip(reason="Cant test here")
def test_67_tls_support():
"""
VEN/VTN
VTN and VEN shall support TLS 1.0 and may support higher versions of
TLS provided that they can still interoperate with TLS 1.0 implementations.
The default cipher suite selection shall be as follows:
The VEN client shall offer at least at least one of the default cipher
suites listed below
The VEN server shall must support at least one of the default cipher
suites listed below and must select one of the default cipher suites
regardless of other cipher suites that may be offered by the VTN
client
The VTN client must offer both the default cipher suites listed
below.
The VTN server must support both of the default cipher suites listed
below and must select one of listed the default cipher suites
regardless of other ciphers that may be offered by the VEN client
Default cipher suites:
TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA
TLS_RSA_WITH_AES_128_CBC_SHA
Note that a VTN or VEN may be configured to support any TLS version and
cipher suite combination based on the needs of a specific deployment.
However in the absence of changes to the default configuration of the VTN
or VEN, the behavior of the devices shall be as noted above.
"""
assert False
@pytest.mark.skip(reason="Cant test here")
def test_68_cert_support():
"""
VEN/VTN
Both VTNs and VENs shall support client and server X.509v3 certificates. A
VTN must support both an ECC and RSA certificate. A VEN must support
either an RSA or ECC certificate and may support both. RSA certificates
must be signed with a minimum key length of 2048 bits. ECC certificates
must be signed with a minimum key length of 224 bits. ECC Hybrid
certificates must be signed with a 256 bit key signed with a RSA 2048 bit
key.
"""
assert False
|
homersoft/oadr2-ven-python | oadr2/event.py | <filename>oadr2/event.py
# Event Handler class.
# --------
# Requires the python libXML wrapper "lxml" to function properly
# pylint: disable=W1202
__author__ = "<NAME> <<EMAIL>>, <NAME> <<EMAIL>>"
import uuid
from typing import List
from lxml import etree
from lxml.builder import ElementMaker
from oadr2 import eventdb, logger
from oadr2.schemas import (NS_A, NS_B, OADR_PROFILE_20A, OADR_PROFILE_20B,
EventSchema)
__author__ = "<NAME> <<EMAIL>>, <NAME> <<EMAIL>>"
class EventHandler(object):
'''
The Event Handler for the project.
Our member variables:
--------
ven_id -- This VEN's id
vtn_ids -- List of ids of VTNs
oadr_profile_level -- The profile level we have
ns_map -- The XML namespace map we are using
market_contexts -- List of Market Contexts
group_id -- ID of group that VEN belogns to
resource_id -- ID of resource in VEN we want to manipulate
party_id -- ID of the party we are party of
db_path -- path to db file
'''
def __init__(self, ven_id, vtn_ids=None, market_contexts=None,
group_id=None, resource_id=None, party_id=None,
oadr_profile_level=OADR_PROFILE_20A,
event_callback=None, db_path=None):
'''
Class constructor
ven_id -- What is the ID of our unit
vtn_ids -- CSV string of VTN Ids to accept
market_contexts -- Another CSV string
group_id -- Which group we belong to
resource_id -- What resouce we are
party_id -- Which party are we party of
oadr_profile_level -- What version of OpenADR 2.0 we want to use
event_callback -- a function to call when events are updated and removed.
The callback should have the signature `cb(updated,removed)` where
each parameter will be passed a dict in the form `{event_id, event_etree}`
where `oadr:oadrEvent` is the root element. You can use functions defined
in the `event` module to pick out individual values from each event.
'''
# 'vtn_ids' is a CSV string of
self.vtn_ids = vtn_ids
if self.vtn_ids is not None:
self.vtn_ids = self.vtn_ids.split(',')
# 'market_contexts' is also a CSV string
self.market_contexts = market_contexts
if self.market_contexts is not None:
self.market_contexts = self.market_contexts.split(',')
self.group_id = group_id
self.resource_id = resource_id
self.party_id = party_id
self.ven_id = ven_id
self.event_callback = event_callback
# the default profile is '2.0a'; do this to set the ns_map
self.oadr_profile_level = oadr_profile_level
if self.oadr_profile_level == OADR_PROFILE_20A:
self.ns_map = NS_A
elif self.oadr_profile_level == OADR_PROFILE_20B:
self.ns_map = NS_B
else:
# Default/Safety, make it the 2.0a spec
self.oadr_profile_level = OADR_PROFILE_20A
self.ns_map = NS_A
self.db = eventdb.DBHandler(db_path=db_path) # TODO: add this back memdb.DBHandler()
self.optouts = set()
def handle_payload(self, payload):
'''
Handle a payload. Puts Events into the handler's event list.
payload -- An lxml.etree.Element object of oadr:oadrDistributeEvent as root node
Returns: An lxml.etree.Element object; which should be used as a response payload
'''
reply_events = []
all_events = []
requestID = payload.findtext('pyld:requestID', namespaces=self.ns_map)
vtnID = payload.findtext('ei:vtnID', namespaces=self.ns_map)
# If we got a payload from an VTN that is not in our list,
# send it a 400 message and return
if self.vtn_ids and (vtnID not in self.vtn_ids):
logger.warning("Unexpected VTN ID: %s, expected one of %r", vtnID, self.vtn_ids)
return self.build_error_response(requestID, '400', 'Unknown vtnID: %s' % vtnID)
# Loop through all of the oadr:oadrEvent 's in the payload
for evt in payload.iterfind('oadr:oadrEvent', namespaces=self.ns_map):
response_required = evt.findtext("oadr:oadrResponseRequired", namespaces=self.ns_map)
evt = evt.find('ei:eiEvent', namespaces=self.ns_map) # go to nested eiEvent
new_event = EventSchema.from_xml(evt)
current_signal_val = get_current_signal_value(evt, self.ns_map)
logger.debug(
f'------ EVENT ID: {new_event.id}({new_event.mod_number}); '
f'Status: {new_event.status}; Current Signal: {current_signal_val}'
)
all_events.append(new_event.id)
old_event = self.db.get_event(new_event.id)
# For the events we need to reply to, make our "opts," and check the status of the event
# By default, we optIn and have an "OK," status (200)
opt = 'optIn'
status = '200'
if old_event and (old_event.mod_number > new_event.mod_number):
logger.warning(
f"Got a smaller modification number "
f"({new_event.mod_number} < {old_event.mod_number}) for event {new_event.id}"
)
status = '403'
opt = 'optOut'
if not self.check_target_info(new_event):
logger.info(f"Opting out of event {new_event.id} - no target match")
status = '403'
opt = 'optOut'
if new_event.id in self.optouts:
logger.info(f"Opting out of event {new_event.id} - user opted out")
status = '200'
opt = 'optOut'
if not new_event.signals:
logger.info(f"Opting out of event {new_event.id} - no simple signal")
opt = 'optOut'
status = '403'
if self.market_contexts and (new_event.market_context not in self.market_contexts):
logger.info(
f"Opting out of event {new_event.id}:"
f"market context {new_event.market_context} does not match"
)
opt = 'optOut'
status = '405'
if response_required == 'always':
reply_events.append((new_event.id, new_event.mod_number, requestID, opt, status))
# We have a new event or an updated old one
# if (old_event is None) or (e_mod_num > old_mod_num):
if opt == "optIn":
if old_event and (old_event.mod_number < new_event.mod_number):
# Add/update the event to our list
# updated_events[e_id] = evt
if new_event.status == "cancelled":
if new_event.status != old_event.status:
new_event.cancel(random_end=True)
else:
new_event.cancel()
self.db.update_event(new_event)
if not old_event:
if new_event.status == "cancelled":
new_event.cancel()
self.db.add_event(new_event)
# Find implicitly cancelled events and get rid of them
for evt in self.get_active_events():
if evt.id not in all_events:
logger.debug(f'Mark event {evt.id} as cancelled')
evt.cancel()
self.db.update_event(evt)
# If we have any in the reply_events list, build some payloads
logger.debug("Replying for events %r", reply_events)
reply = None
if reply_events:
reply = self.build_created_payload(reply_events)
return reply
def build_request_payload(self):
'''
Assemble an XML payload to request an event from the VTN.
Returns: An lxml.etree.Element object
'''
oadr = ElementMaker(namespace=self.ns_map['oadr'], nsmap=self.ns_map)
pyld = ElementMaker(namespace=self.ns_map['pyld'], nsmap=self.ns_map)
ei = ElementMaker(namespace=self.ns_map['ei'], nsmap=self.ns_map)
emix = ElementMaker(namespace=self.ns_map['emix'], nsmap=self.ns_map)
payload = oadr.oadrRequestEvent(
pyld.eiRequestEvent(
pyld.requestID(str(uuid.uuid4())),
# emix.marketContext('http://enernoc.com'),
ei.venID(self.ven_id),
# ei.eventID('asdf'),
# pyld.eventFilter('all'),
pyld.replyLimit('99')
)
)
return payload
def build_created_payload(self, events):
'''
Assemble an XML payload to send out for events marked "response
required."
events -- List of tuples with the following structure:
(Event ID, Modification Number, Request ID,
Opt, Status)
Returns: An XML Tree in a string
'''
# Setup the element makers
oadr = ElementMaker(namespace=self.ns_map['oadr'], nsmap=self.ns_map)
pyld = ElementMaker(namespace=self.ns_map['pyld'], nsmap=self.ns_map)
ei = ElementMaker(namespace=self.ns_map['ei'], nsmap=self.ns_map)
def responses(events):
for e_id, mod_num, requestID, opt, status in events:
yield ei.eventResponse(
ei.responseCode(str(status)),
pyld.requestID(requestID),
ei.qualifiedEventID(
ei.eventID(e_id),
ei.modificationNumber(str(mod_num))),
ei.optType(opt))
payload = oadr.oadrCreatedEvent(
pyld.eiCreatedEvent(
ei.eiResponse(
ei.responseCode('200'),
pyld.requestID()),
ei.eventResponses(*list(responses(events))),
ei.venID(self.ven_id)))
logger.debug("Created payload:\n%s",
etree.tostring(payload, pretty_print=True))
return payload
def build_error_response(self, request_id, code, description=None):
'''
Assemble the XML for an error response payload.
request_id -- Request ID of offending payload
code -- The HTTP Error Code Status we want to use
description -- An extra note on what was not acceptable
Returns: An lxml.etree.Element object containing the payload
'''
oadr = ElementMaker(namespace=self.ns_map['oadr'], nsmap=self.ns_map)
pyld = ElementMaker(namespace=self.ns_map['pyld'], nsmap=self.ns_map)
ei = ElementMaker(namespace=self.ns_map['ei'], nsmap=self.ns_map)
payload = oadr.oadrCreatedEvent(
pyld.eiCreatedEvent(
ei.eiResponse(
ei.responseCode(code),
pyld.requestID(request_id)),
ei.venID(self.ven_id)))
logger.debug("Error payload:\n%s",
etree.tostring(payload, pretty_print=True))
return payload
def check_target_info(self, evt: EventSchema):
'''
Checks to see if we haven been targeted by the event.
evt -- lxml.etree.ElementTree object w/ an OpenADR Event structure
Returns: True if we are in the target info, False otherwise.
'''
accept = True
if evt.party_ids or evt.group_ids or evt.resource_ids or evt.ven_ids:
accept = False
if evt.party_ids and self.party_id in evt.party_ids:
accept = True
if evt.group_ids and self.group_id in evt.group_ids:
accept = True
if evt.resource_ids and self.resource_id in evt.resource_ids:
accept = True
if evt.ven_ids and self.ven_id in evt.ven_ids:
accept = True
return accept
def fill_event_target_info(self, evt: EventSchema) -> EventSchema:
evt.group_ids = [self.group_id] if self.group_id else None
evt.resource_ids = [self.resource_id] if self.resource_id else None
evt.party_ids = [self.party_id] if self.party_id else None
evt.ven_ids = [self.ven_id] if self.ven_id else None
return evt
def get_active_events(self) -> List[EventSchema]:
'''
Get an iterator of all the active events.
Return: An iterator containing lxml.etree.ElementTree EiEvent objects
'''
# Get the events, and convert their XML blobs to lxml objects
active = self.db.get_active_events()
for index, evt in enumerate(active):
evt = self.fill_event_target_info(evt)
if evt.id in self.optouts:
active.pop(index)
return active
def remove_events(self, evt_id_list):
'''
Remove a list of events from our internal member dictionary
event_id_list - List of Event IDs
'''
self.db.remove_events(evt_id_list)
for evt in evt_id_list:
self.optouts.discard(evt)
def optout_event(self, e_id):
'''
Opt out of an event by its ID
:param e_id: ID of the event we want to opt out of
:return:
'''
if e_id not in [event.id for event in self.db.get_active_events()]:
return # optout of not existing event
self.optouts.add(e_id)
def update_active_status(self, event_id):
'''
Update given event status
:param event_id:
:return:
'''
event = self.db.get_event(event_id)
if event and event.status in ["near", "far"]:
event.status = "active"
self.db.update_event(event)
def get_current_signal_value(evt, ns_map=NS_A):
'''
Gets the signal value of an event
evt -- lxml.etree.Element object
ns_map -- Dictionary of namesapces for OpenADR 2.0; default is the 2.0a spec
Returns: an ei:value value
'''
return evt.findtext(
'ei:eiEventSignals/ei:eiEventSignal/ei:currentValue/' + \
'ei:payloadFloat/ei:value', namespaces=ns_map)
|
homersoft/oadr2-ven-python | test/schedule_unittest.py | import datetime as dt
import unittest
from dateutil.relativedelta import relativedelta
from oadr2 import schedule
class ScheduleTest(unittest.TestCase):
def test_parse_duration(self):
self.assertEqual(('+', 0, 0, 0, 0, 3, 0), schedule.parse_duration('PT3M'))
self.assertEqual(('+', 0, 0, 0, 0, 3, 0), schedule.parse_duration('+PT3M'))
self.assertEqual(('+', 1, 0, 0, 0, 3, 0), schedule.parse_duration('+P1YT3M'))
self.assertEqual(('+', 0, 0, 0, 0, 3, 0), schedule.parse_duration('P0YT3M'))
self.assertEqual(('+', 0, 0, 0, 0, 0, 30), schedule.parse_duration('P0Y0M0DT0H0M30S'))
self.assertEqual(('+', 0, 0, 12, 5, 15, 23), schedule.parse_duration('P12DT5H15M23S'))
self.assertEqual(('-', 0, 0, 0, 2, 0, 0), schedule.parse_duration('-PT2H'))
self.assertEqual(('+', 0, 0, 12, 0, 0, 0), schedule.parse_duration('P12D'))
def test_parse_duration_to_delta(self):
self.assertEqual(
(relativedelta(minutes=3), '+'),
schedule.duration_to_delta('PT3M'))
self.assertEqual(
(relativedelta(minutes=3), '+'),
schedule.duration_to_delta('+PT3M'))
self.assertEqual(
(relativedelta(years=1, minutes=5), '+'),
schedule.duration_to_delta('+P1YT5M'))
self.assertEqual(
(relativedelta(seconds=55), '+'),
schedule.duration_to_delta('P0YT55S'))
self.assertEqual(
(relativedelta(seconds=30), '+'),
schedule.duration_to_delta('P0Y0M0DT0H0M30S'))
self.assertEqual(
(relativedelta(days=12, hours=5, minutes=15, seconds=23), '+'),
schedule.duration_to_delta('P12DT5H15M23S'))
self.assertEqual(
(relativedelta(hours=2), '-'),
schedule.duration_to_delta('-PT2H'))
self.assertEqual(
(relativedelta(days=12), '+'),
schedule.duration_to_delta('P12D'))
def test_str_to_dttm(self):
self.assertEqual(
dt.datetime(2013, 5, 12, 8, 33, 50),
schedule.str_to_datetime('2013-05-12T08:33:50Z'))
def test_dttm_to_str(self):
self.assertEqual('2013-05-12T08:33:50Z',
schedule.dttm_to_str(dt.datetime(2013, 5, 12, 8, 33, 50), include_msec=False))
def test_random_offset(self):
start = dt.datetime(2013, 5, 12, 8, 33, 50)
dttm = schedule.random_offset(
start, None, None)
self.assertEqual(start, dttm)
dttm = schedule.random_offset(
start, 'PT3M', None)
print(("new dttm: %s" % dttm))
min_dttm = dt.datetime(2013, 5, 12, 8, 30, 50)
self.assertTrue(min_dttm <= dttm < start)
dttm2 = schedule.random_offset(
start, 'PT1H', 'PT3D12M')
print(("new dttm: %s" % dttm2))
self.assertNotEqual(dttm, dttm2)
min_dttm = dt.datetime(2013, 5, 12, 7, 33, 50)
max_dttm = dt.datetime(2013, 5, 15, 7, 45, 50)
self.assertTrue(min_dttm <= dttm < max_dttm)
def test_choose_interval(self):
start = dt.datetime(2013, 5, 12, 8, 30, 50)
intervals = ('PT5M', 'PT30S', 'PT12H')
# before event start
self.assertEqual(-1, schedule.choose_interval(start, intervals,
dt.datetime(2013, 5, 12, 8, 22, 0)))
# first interval (5 minutes)
self.assertEqual(0, schedule.choose_interval(start, intervals,
dt.datetime(2013, 5, 12, 8, 30, 50)))
self.assertEqual(0, schedule.choose_interval(start, intervals,
dt.datetime(2013, 5, 12, 8, 30, 51)))
# second interval (30 seconds)
self.assertEqual(1, schedule.choose_interval(start, intervals,
dt.datetime(2013, 5, 12, 8, 35, 50)))
self.assertEqual(1, schedule.choose_interval(start, intervals,
dt.datetime(2013, 5, 12, 8, 36, 19)))
# third interval (12 hours)
self.assertEqual(2, schedule.choose_interval(start, intervals,
dt.datetime(2013, 5, 12, 8, 36, 20)))
self.assertEqual(2, schedule.choose_interval(start, intervals,
dt.datetime(2013, 5, 12, 20, 36, 19)))
# after the last interval
self.assertEqual(None, schedule.choose_interval(start, intervals,
dt.datetime(2013, 5, 12, 20, 36, 20)))
if __name__ == '__main__':
unittest.main()
|
homersoft/oadr2-ven-python | test/adr_event_generator.py | <filename>test/adr_event_generator.py
from dataclasses import dataclass
from datetime import datetime, timedelta
from enum import Enum
from typing import Dict, List, Optional, Union
from lxml import etree
from oadr2.schemas import EventSchema, SignalSchema
def format_duration(duration: Union[timedelta, None]) -> str:
if not duration:
return "PT0M"
hours, remainder = divmod(duration.seconds, 3600)
minutes, seconds = divmod(remainder, 60)
return f"P0Y0M0DT{hours}H{minutes}M{seconds}S"
def format_datetime(time: datetime) -> str:
return f"{time.isoformat()}Z"
class AdrEventStatus(Enum):
PENDING = "near"
ACTIVE = "active"
CANCELLED = "cancelled"
COMPLETED = "completed"
@dataclass
class AdrInterval:
index: int
level: float
duration: timedelta
def to_xml(self):
return f"""
<ei:interval>
<ical:duration>
<ical:duration>{format_duration(self.duration)}</ical:duration>
</ical:duration>
<ical:uid>
<ical:text>{self.index}</ical:text>
</ical:uid>
<ei:signalPayload>
<ei:payloadFloat>
<ei:value>{self.level}</ei:value>
</ei:payloadFloat>
</ei:signalPayload>
</ei:interval>"""
class AdrEvent:
def __init__(
self,
id: Union[str, None],
start: datetime,
signals: List[Dict[str, Union[float, int, timedelta]]],
status: AdrEventStatus,
mod_number: Optional[int] = 0,
end: Optional[datetime] = None,
start_before: Optional[timedelta] = None,
start_after: Optional[timedelta] = None,
original_start: datetime = None,
cancellation_offset: timedelta = None,
group_ids: Optional[List[str]] = None,
resource_ids: Optional[List[str]] = None,
party_ids: Optional[List[str]] = None,
ven_ids: Optional[List[str]] = ['VEN_ID'],
vtn_id: Optional[str] = "TH_VTN",
market_context: Optional[str] = "http://market.context",
test_event: bool = False,
priority: int = 1,
response_required: bool = True,
signal_name: str = "simple"
):
self.id = id
self.start = start
self.original_start = original_start or start
self.cancellation_offset = cancellation_offset
self.raw_signals = [signal for signal in signals]
self.signals = signals
self.intervals = [AdrInterval(**signal) for signal in self.signals]
self.duration = timedelta()
for signal in self.signals:
self.duration += signal["duration"]
self.end = end or self.start + self.duration
self.group_ids = group_ids
self.resource_ids = resource_ids
self.party_ids = party_ids
self.ven_ids = ven_ids
self.mod_number = mod_number
self.status = status
self.start_before = start_before
self.start_after = start_after
self.vtn_id = vtn_id
self.market_context = market_context
self.created_date = datetime(2020, 1, 1, 10, 10)
self.test_event = test_event
self.priority = priority
self.response_required = response_required
self.signal_name = signal_name
def to_obj(self):
_signals = [
dict(
index=s["index"],
level=s["level"],
duration=format_duration(s["duration"])
) for s in self.signals
]
return EventSchema(
id=self.id,
vtn_id=self.vtn_id,
mod_number=self.mod_number,
start=self.start,
original_start=self.original_start,
end=self.end,
signals=[SignalSchema(**signal) for signal in _signals],
status=self.status.value,
cancellation_offset=format_duration(self.cancellation_offset) if self.cancellation_offset else None,
ven_ids=self.ven_ids,
market_market_context=self.market_context,
group_ids=self.group_ids,
resource_ids=self.resource_ids,
party_ids=self.party_ids,
test_event=self.test_event,
priority=self.priority
)
def to_xml(self):
intervals_xml = "".join([interval.to_xml() for interval in self.intervals])
start_after = f"""<ical:tolerance>
<ical:tolerate>
<ical:startafter>{format_duration(self.start_after)}</ical:startafter>
</ical:tolerate>
</ical:tolerance>""" if self.start_after else ""
ven_xml = f"<ei:venID>{','.join(self.ven_ids)}</ei:venID>" if self.ven_ids else ""
group_xml = f"<ei:groupID>{','.join(self.group_ids)}</ei:groupID>" if self.group_ids else ""
resource_xml = f"<ei:resourceID>{','.join(self.resource_ids)}</ei:resourceID>" if self.resource_ids else ""
party_xml = f"<ei:partyID>{','.join(self.party_ids)}</ei:partyID>" if self.party_ids else ""
return f"""
<oadrEvent>
<ei:eiEvent>
<ei:eventDescriptor>
<ei:eventID>{self.id}</ei:eventID>
<ei:modificationNumber>{self.mod_number}</ei:modificationNumber>
<ei:priority>{self.priority}</ei:priority>
<ei:eiMarketContext>
<emix:marketContext>{self.market_context}</emix:marketContext>
</ei:eiMarketContext>
<ei:createdDateTime>{format_datetime(self.created_date)}</ei:createdDateTime>
<ei:eventStatus>{self.status.value}</ei:eventStatus>
<ei:testEvent>{self.test_event}</ei:testEvent>
<ei:vtnComment></ei:vtnComment>
</ei:eventDescriptor>
<ei:eiActivePeriod>
<ical:properties>
<ical:dtstart>
<ical:date-time>{format_datetime(self.start)}</ical:date-time>
</ical:dtstart>
<ical:duration>
<ical:duration>{format_duration(self.duration)}</ical:duration>
</ical:duration>
{start_after}
<ei:x-eiNotification>
<ical:duration>P0Y0M0DT0H0M0S</ical:duration>
</ei:x-eiNotification>
</ical:properties>
<ical:components xsi:nil="true"/>
</ei:eiActivePeriod>
<ei:eiEventSignals>
<ei:eiEventSignal>
<strm:intervals>
{intervals_xml}
</strm:intervals>
<ei:signalName>{self.signal_name}</ei:signalName>
<ei:signalType>level</ei:signalType>
<ei:signalID>SignalID</ei:signalID>
<ei:currentValue>
<ei:payloadFloat>
<ei:value>0.0</ei:value>
</ei:payloadFloat>
</ei:currentValue>
</ei:eiEventSignal>
</ei:eiEventSignals>
<ei:eiTarget>
{ven_xml}
{party_xml}
{resource_xml}
{group_xml}
</ei:eiTarget>
</ei:eiEvent>
<oadrResponseRequired>{"always" if self.response_required else "never"}</oadrResponseRequired>
</oadrEvent>
"""
def generate_payload(event_list, vtn_id="TH_VTN"):
evt_xml = "".join([event.to_xml() for event in event_list])
template = f"""
<oadrDistributeEvent
xmlns="http://openadr.org/oadr-2.0a/2012/07"
xmlns:ei="http://docs.oasis-open.org/ns/energyinterop/201110"
xmlns:emix="http://docs.oasis-open.org/ns/emix/2011/06"
xmlns:pyld="http://docs.oasis-open.org/ns/energyinterop/201110/payloads"
xmlns:strm="urn:ietf:params:xml:ns:icalendar-2.0:stream"
xmlns:ical="urn:ietf:params:xml:ns:icalendar-2.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
>
<eiResponse>
<responseCode>200</responseCode>
<pyld:requestID/>
</eiResponse>
<pyld:requestID>OadrDisReq092520_152645_178</pyld:requestID>
<ei:vtnID>{vtn_id}</ei:vtnID>
{evt_xml}
</oadrDistributeEvent>
"""
# print(template)
return etree.fromstring(template)
|
homersoft/oadr2-ven-python | oadr2/__init__.py | <filename>oadr2/__init__.py
import logging
class IdFilter():
def __init__(self, client_id=None):
self.client_id = client_id
def filter(self, record):
record.client_id = self.client_id
return True
logger = logging.getLogger("oadr2")
|
homersoft/oadr2-ven-python | oadr2/eventdb.py | <gh_stars>1-10
from datetime import datetime
from typing import Dict, List, Optional, Sequence, Union
from sqlalchemy import (Boolean, Column, Float, ForeignKey, Integer, String,
create_engine)
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import Session, relationship, sessionmaker
from oadr2.schemas import EventSchema
Base = declarative_base()
class Signal(Base):
__tablename__ = "signals"
event_id = Column(String, ForeignKey("events.id"), primary_key=True)
index = Column(Integer, primary_key=True)
duration = Column(String)
level = Column(Float)
class Event(Base):
__tablename__ = "events"
id = Column(String, primary_key=True, index=True, unique=True)
mod_number = Column(Integer, nullable=False, default=0)
_start = Column(String)
_original_start = Column(String)
_end = Column(String)
_signals = relationship("Signal", cascade="all,delete")
cancellation_offset = Column(String)
status = Column(String)
priority = Column(Integer)
test_event = Column(Boolean)
@property
def start(self) -> datetime:
return datetime.fromisoformat(self._start)
@start.setter
def start(self, value: datetime) -> None:
self._start = value.isoformat()
@property
def original_start(self) -> datetime:
return datetime.fromisoformat(self._original_start)
@original_start.setter
def original_start(self, value: datetime) -> None:
self._original_start = value.isoformat()
@property
def end(self) -> Union[datetime, None]:
return datetime.fromisoformat(self._end) if self._end else None
@end.setter
def end(self, value: Union[datetime, None]) -> None:
self._end = value.isoformat() if value else None
@property
def signals(self) -> List[Dict[str, Union[float, int, str]]]:
return [
dict(
duration=signal.duration,
index=signal.index,
level=signal.level
) for signal in self._signals
]
@signals.setter
def signals(self, value: List[Dict[str, Union[float, int, str]]]) -> None:
self._signals = [
Signal(
event_id=self.id,
index=signal["index"],
duration=signal["duration"],
level=signal["level"]
) for signal in value
]
class DBHandler:
def __init__(self, db_path: str):
engine = create_engine(f"sqlite:///{db_path}")
self.session: Session = sessionmaker(bind=engine, autocommit=True)()
Event.metadata.create_all(engine)
self.accepted_params = {"id", "mod_number", "start", "original_start", "end", "signals",
"cancellation_offset", "status", "priority", "test_event"}
def get_active_events(self) -> List[EventSchema]:
return sorted(
[
EventSchema.from_orm(evt) for evt in self.session.query(Event).all()
], key=lambda evt: evt.start
)
def update_event(self, event: EventSchema) -> None:
self.remove_events([event.id])
self.add_event(event)
def add_event(self, event: EventSchema) -> None:
db_item = Event(**event.dict(include=self.accepted_params))
self.session.add(db_item)
def get_event(self, event_id: str) -> Optional[EventSchema]:
evt = self.session.query(Event).filter_by(id=event_id).first()
return EventSchema.from_orm(evt) if evt else None
def remove_events(self, event_ids: Sequence[str]) -> None:
for event_id in event_ids:
self.session.query(Event).filter_by(id=event_id).delete()
self.session.query(Signal).filter_by(event_id=event_id).delete()
|
homersoft/oadr2-ven-python | test/test_event_processing.py | from datetime import datetime, timedelta
from test.adr_event_generator import AdrEvent, AdrEventStatus, generate_payload
from unittest import mock
import pytest
from freezegun import freeze_time
from oadr2 import controller, event
from oadr2.schemas import NS_A
TEST_DB_ADDR = "%s/test2.db"
scenario = dict(
not_started=AdrEvent(
id="FooEvent",
start=datetime.utcnow()+timedelta(seconds=60),
signals=[dict(index=0, duration=timedelta(seconds=10), level=1.0)],
status=AdrEventStatus.PENDING,
),
started=AdrEvent(
id="FooEvent",
start=datetime.utcnow()-timedelta(seconds=5),
signals=[dict(index=0, duration=timedelta(seconds=10), level=1.0)],
status=AdrEventStatus.ACTIVE,
),
not_started_with_target=AdrEvent(
id="FooEvent",
start=datetime.utcnow() + timedelta(seconds=60),
signals=[dict(index=0, duration=timedelta(seconds=10), level=1.0)],
status=AdrEventStatus.PENDING, group_ids=["ids"], party_ids=["ids"], resource_ids=["ids"],
),
signal_1of2=AdrEvent(
id="FooEvent",
start=datetime.utcnow() - timedelta(seconds=10),
signals=[
dict(index=0, duration=timedelta(seconds=15), level=1.0),
dict(index=1, duration=timedelta(seconds=5), level=2.0),
],
status=AdrEventStatus.ACTIVE,
),
signal_2of2=AdrEvent(
id="FooEvent",
start=datetime.utcnow() - timedelta(seconds=10),
signals=[
dict(index=0, duration=timedelta(seconds=5), level=1.0),
dict(index=1, duration=timedelta(seconds=15), level=2.0),
],
status=AdrEventStatus.ACTIVE,
),
events_1of2=[
AdrEvent(
id="FooEvent1",
start=datetime.utcnow() - timedelta(seconds=10),
signals=[
dict(index=0, duration=timedelta(seconds=20), level=1.0),
],
status=AdrEventStatus.ACTIVE,
),
AdrEvent(
id="FooEvent2",
start=datetime.utcnow() + timedelta(seconds=10),
signals=[
dict(index=0, duration=timedelta(seconds=20), level=2.0),
],
status=AdrEventStatus.PENDING,
),
],
events_2of2=[
AdrEvent(
id="FooEvent1",
start=datetime.utcnow() - timedelta(seconds=10),
signals=[
dict(index=0, duration=timedelta(seconds=5), level=1.0),
],
status=AdrEventStatus.COMPLETED,
),
AdrEvent(
id="FooEvent2",
start=datetime.utcnow() - timedelta(seconds=5),
signals=[
dict(index=0, duration=timedelta(seconds=20), level=2.0),
],
status=AdrEventStatus.PENDING,
),
],
cancelled=AdrEvent(
id="FooEvent",
start=datetime.utcnow()-timedelta(seconds=60),
signals=[
dict(index=0, duration=timedelta(seconds=120), level=1.0)
],
status=AdrEventStatus.CANCELLED,
end=datetime.utcnow()-timedelta(seconds=10)
),
cancelled_still_active=AdrEvent(
id="FooEvent",
start=datetime.utcnow()-timedelta(seconds=60),
signals=[
dict(index=0, duration=timedelta(seconds=120), level=1.0)
],
status=AdrEventStatus.CANCELLED,
end=datetime.utcnow()+timedelta(seconds=10)
)
)
@pytest.mark.parametrize(
"event_list, expected",
[
pytest.param(
[scenario["not_started"]],
(0, None, []),
id="event not started"
),
pytest.param(
[scenario["not_started_with_target"]],
(0, None, []),
id="event not started"
),
pytest.param(
[scenario["started"]],
(1.0, 'FooEvent', []),
id="event started"
),
pytest.param(
[scenario["signal_1of2"]],
(1.0, 'FooEvent', []),
id="event started, signal 1 of 2"
),
pytest.param(
[scenario["signal_2of2"]],
(2.0, 'FooEvent', []),
id="event started, signal 2 of 2"
),
pytest.param(
scenario["events_1of2"],
(1.0, 'FooEvent1', []),
id="event 1 of 2"
),
pytest.param(
scenario["events_2of2"],
(2.0, 'FooEvent2', ["FooEvent1"]),
id="event 2 of 2, first deleted"
),
pytest.param(
[scenario["cancelled"]],
(0, None, ["FooEvent"]),
id="event cancelled"
),
pytest.param(
[scenario["cancelled_still_active"]],
(1.0, "FooEvent", []),
id="event cancelled but not deleted"
),
]
)
def test_calculate_current_event_status(event_list, expected, tmpdir):
event_handler = event.EventHandler("VEN_ID", db_path=TEST_DB_ADDR % tmpdir)
event_controller = controller.EventController(event_handler)
signal_level, evt_id, remove_events = event_controller._calculate_current_event_status([evt.to_obj() for evt in event_list])
assert (signal_level, evt_id, remove_events) == expected
@pytest.mark.parametrize(
"event_list, expected_level, expected_removed",
[
pytest.param(
[scenario["not_started"]], 0, [],
id="event not started"
),
pytest.param(
[scenario["started"]], 1.0, [],
id="event started"
),
pytest.param(
[scenario["signal_1of2"]], 1.0, [],
id="event started, signal 1 of 2"
),
pytest.param(
[scenario["signal_2of2"]], 2.0, [],
id="event started, signal 2 of 2"
),
pytest.param(
scenario["events_1of2"], 1.0, [],
id="event 1 of 2"
),
pytest.param(
scenario["events_2of2"], 2.0, ["FooEvent1"],
id="event 2 of 2, first deleted"
),
pytest.param(
[scenario["cancelled"]], 0, ["FooEvent"],
id="event cancelled"
),
pytest.param(
[scenario["cancelled_still_active"]], 1.0, [],
id="event cancelled but not deleted"
),
]
)
def test_calculate_update_control(event_list, expected_level, expected_removed, tmpdir):
db_mock = mock.MagicMock()
event_handler = event.EventHandler("VEN_ID", db_path=TEST_DB_ADDR % tmpdir)
event_handler.db.remove_events = db_mock
event_controller = controller.EventController(event_handler)
signal_level = event_controller._update_control([evt.to_obj() for evt in event_list])
assert signal_level == expected_level
if expected_removed:
parsed_events = db_mock.call_args[0][0]
for evt in expected_removed:
assert evt in parsed_events
parsed_events.remove(evt)
assert parsed_events == []
else:
db_mock.assert_not_called()
responseCode = 'pyld:eiCreatedEvent/ei:eiResponse/ei:responseCode'
requestID = 'pyld:eiCreatedEvent/ei:eventResponses/ei:eventResponse/pyld:requestID'
optType = 'pyld:eiCreatedEvent/ei:eventResponses/ei:eventResponse/ei:optType'
venID = 'pyld:eiCreatedEvent/ei:venID'
@pytest.mark.parametrize(
"event_list",
[
pytest.param(
[scenario["not_started"]],
id="event not started"
),
pytest.param(
[scenario["started"]],
id="event started"
),
pytest.param(
[scenario["signal_1of2"]],
id="event started, signal 1 of 2"
),
pytest.param(
[scenario["signal_2of2"]],
id="event started, signal 2 of 2"
),
pytest.param(
[scenario["cancelled"]],
id="event cancelled"
),
pytest.param(
[scenario["cancelled_still_active"]],
id="event cancelled but not deleted"
),
]
)
def test_handle_payload(event_list, tmpdir):
db_mock = mock.MagicMock()
event_handler = event.EventHandler("VEN_ID", db_path=TEST_DB_ADDR % tmpdir)
event_handler.db.add_event = db_mock
reply = event_handler.handle_payload(generate_payload(event_list))
assert reply.findtext(responseCode, namespaces=NS_A) == "200"
assert reply.findtext(requestID, namespaces=NS_A) == "OadrDisReq092520_152645_178"
assert reply.findtext(optType, namespaces=NS_A) == "optIn"
assert reply.findtext(venID, namespaces=NS_A) == "VEN_ID"
db_mock.assert_called_once()
for index, evt in enumerate(event_list):
parsed_event = db_mock.call_args[0][index]
expected_event = evt.to_obj()
assert parsed_event.id == expected_event.id
assert parsed_event.start == expected_event.start
assert parsed_event.original_start == expected_event.original_start
assert parsed_event.cancellation_offset == expected_event.cancellation_offset
assert parsed_event.signals == expected_event.signals
assert parsed_event.mod_number == expected_event.mod_number
assert parsed_event.status == expected_event.status
if expected_event.status != AdrEventStatus.CANCELLED.value:
assert parsed_event.end == expected_event.end
@pytest.mark.parametrize(
"expected_event, handler_param",
[
pytest.param(
AdrEvent(
id="FooEvent",
start=datetime.utcnow() + timedelta(seconds=60),
signals=[dict(index=0, duration=timedelta(seconds=10), level=1.0)],
status=AdrEventStatus.PENDING, resource_ids=["some_parameter"]
),
dict(resource_id="some_parameter"),
id="resource_id"
),
pytest.param(
AdrEvent(
id="FooEvent",
start=datetime.utcnow() + timedelta(seconds=60),
signals=[dict(index=0, duration=timedelta(seconds=10), level=1.0)],
status=AdrEventStatus.PENDING, party_ids=["some_parameter"]
),
dict(party_id="some_parameter"),
id="party_id"
),
pytest.param(
AdrEvent(
id="FooEvent",
start=datetime.utcnow() + timedelta(seconds=60),
signals=[dict(index=0, duration=timedelta(seconds=10), level=1.0)],
status=AdrEventStatus.PENDING, group_ids=["some_parameter"]
),
dict(group_id="some_parameter"),
id="group_id"
),
]
)
def test_handle_payload_with_target_info(expected_event, handler_param, tmpdir):
db_mock = mock.MagicMock()
event_handler = event.EventHandler("VEN_ID", db_path=TEST_DB_ADDR % tmpdir, **handler_param)
event_handler.db.add_event = db_mock
reply = event_handler.handle_payload(generate_payload([expected_event]))
assert reply.findtext(responseCode, namespaces=NS_A) == "200"
assert reply.findtext(requestID, namespaces=NS_A) == "OadrDisReq092520_152645_178"
assert reply.findtext(optType, namespaces=NS_A) == "optIn"
assert reply.findtext(venID, namespaces=NS_A) == "VEN_ID"
db_mock.assert_called_once()
parsed_event = db_mock.call_args[0][0]
expected_event = expected_event.to_obj()
assert parsed_event.id == expected_event.id
assert parsed_event.start == expected_event.start
assert parsed_event.original_start == expected_event.original_start
assert parsed_event.cancellation_offset == expected_event.cancellation_offset
assert parsed_event.signals == expected_event.signals
assert parsed_event.mod_number == expected_event.mod_number
assert parsed_event.status == expected_event.status
assert parsed_event.end == expected_event.end
@pytest.mark.parametrize(
"expected_event, handler_param",
[
pytest.param(
AdrEvent(
id="FooEvent",
start=datetime.utcnow() + timedelta(seconds=60),
signals=[dict(index=0, duration=timedelta(seconds=10), level=1.0)],
status=AdrEventStatus.PENDING, resource_ids=["some_parameter"], ven_ids=[]
),
dict(resource_id="other_parameter"),
id="resource_id"
),
pytest.param(
AdrEvent(
id="FooEvent",
start=datetime.utcnow() + timedelta(seconds=60),
signals=[dict(index=0, duration=timedelta(seconds=10), level=1.0)],
status=AdrEventStatus.PENDING, party_ids=["some_parameter"], ven_ids=[]
),
dict(party_id="other_parameter"),
id="party_id"
),
pytest.param(
AdrEvent(
id="FooEvent",
start=datetime.utcnow() + timedelta(seconds=60),
signals=[dict(index=0, duration=timedelta(seconds=10), level=1.0)],
status=AdrEventStatus.PENDING, group_ids=["some_parameter"], ven_ids=[]
),
dict(group_id="other_parameter"),
id="group_id"
),
]
)
def test_handle_payload_with_wrong_target_info(expected_event, handler_param, tmpdir):
db_mock = mock.MagicMock()
event_handler = event.EventHandler("VEN_ID", db_path=TEST_DB_ADDR % tmpdir, **handler_param)
event_handler.db.update_event = db_mock
reply = event_handler.handle_payload(generate_payload([expected_event]))
assert reply.findtext(responseCode, namespaces=NS_A) == "200"
assert reply.findtext(requestID, namespaces=NS_A) == "OadrDisReq092520_152645_178"
assert reply.findtext(optType, namespaces=NS_A) == "optOut"
assert reply.findtext(venID, namespaces=NS_A) == "VEN_ID"
db_mock.assert_not_called()
@pytest.mark.parametrize(
"event_list",
[
pytest.param(
[scenario["not_started"]],
id="event not started"
),
pytest.param(
[scenario["started"]],
id="event started"
),
pytest.param(
[scenario["signal_1of2"]],
id="event started, signal 1 of 2"
),
pytest.param(
[scenario["signal_2of2"]],
id="event started, signal 2 of 2"
),
]
)
def test_handle_payload_with_db(event_list, tmpdir):
event_handler = event.EventHandler("VEN_ID", db_path=TEST_DB_ADDR % tmpdir)
reply = event_handler.handle_payload(generate_payload(event_list))
active_events = event_handler.get_active_events()
for evt in event_list:
assert evt.to_obj() in active_events
@pytest.mark.parametrize(
"event_list",
[
pytest.param(
[scenario["cancelled"]],
id="event cancelled"
),
pytest.param(
[scenario["cancelled_still_active"]],
id="event cancelled but not deleted"
),
]
)
def test_handle_cancelled_payload_with_db(event_list, tmpdir):
event_handler = event.EventHandler("VEN_ID", db_path=TEST_DB_ADDR % tmpdir)
event_handler.handle_payload(generate_payload(event_list))
active_event = event_handler.get_active_events()[0]
expected_event = event_list[0].to_obj()
assert active_event.end != expected_event.end
active_event.end = expected_event.end = None
assert active_event == expected_event
@pytest.mark.parametrize(
"test_event",
[
pytest.param(
scenario["not_started"],
id="event not started"
),
]
)
def test_update_with_db(test_event, tmpdir):
event_handler = event.EventHandler("VEN_ID", db_path=TEST_DB_ADDR % tmpdir)
event_handler.handle_payload(generate_payload([test_event]))
active_events = event_handler.get_active_events()
assert test_event.to_obj() in active_events
test_event.mod_number += 1
test_event.status = AdrEventStatus.ACTIVE
event_handler.handle_payload(generate_payload([test_event]))
active_events = event_handler.get_active_events()
assert test_event.to_obj() in active_events
def test_implied_cancellation(tmpdir):
event1 = AdrEvent(
id="FooEvent1",
start=datetime.utcnow()-timedelta(seconds=60),
signals=[dict(index=0, duration=timedelta(minutes=10), level=1.0)],
status=AdrEventStatus.ACTIVE,
)
event2 = AdrEvent(
id="FooEvent2",
start=datetime.utcnow()-timedelta(seconds=50),
signals=[dict(index=0, duration=timedelta(minutes=10), level=2.0)],
status=AdrEventStatus.ACTIVE,
)
event_handler = event.EventHandler("VEN_ID", db_path=TEST_DB_ADDR % tmpdir)
event_handler.handle_payload(generate_payload([event1]))
active_events = event_handler.get_active_events()
assert [event1.to_obj()] == active_events
with freeze_time():
event_handler.handle_payload(generate_payload([event2]))
active_events = event_handler.get_active_events()
cancelled_evt = event1.to_obj()
cancelled_evt.cancel()
assert [cancelled_evt, event2.to_obj()] == active_events
def test_explicite_cancellation(tmpdir):
test_event = AdrEvent(
id="FooEvent",
start=datetime.utcnow()-timedelta(seconds=60),
signals=[dict(index=0, duration=timedelta(minutes=10), level=2.0)],
status=AdrEventStatus.ACTIVE,
)
event_handler = event.EventHandler("VEN_ID", db_path=TEST_DB_ADDR % tmpdir)
event_handler.handle_payload(generate_payload([test_event]))
active_events = event_handler.get_active_events()
assert [test_event.to_obj()] == active_events
with freeze_time():
test_event.mod_number += 1
test_event.status = AdrEventStatus.CANCELLED
test_event.end = datetime.utcnow()
event_handler.handle_payload(generate_payload([test_event]))
active_events = event_handler.get_active_events()
assert [test_event.to_obj()] == active_events
# test second cancellation
event_handler.handle_payload(generate_payload([test_event]))
active_events = event_handler.get_active_events()
assert [test_event.to_obj()] == active_events
# test subsequent cancellation
event_handler.handle_payload(generate_payload([test_event]))
active_events = event_handler.get_active_events()
assert [test_event.to_obj()] == active_events
@pytest.mark.parametrize(
"test_event",
[
pytest.param(
AdrEvent(
id="FooEvent",
start=datetime.utcnow() - timedelta(seconds=60),
signals=[dict(index=0, duration=timedelta(minutes=10), level=2.0)],
status=AdrEventStatus.ACTIVE,
).to_obj(),
id="event active"
),
pytest.param(
AdrEvent(
id="FooEvent",
start=datetime.utcnow() + timedelta(seconds=60),
signals=[dict(index=0, duration=timedelta(minutes=10), level=2.0)],
status=AdrEventStatus.PENDING,
).to_obj(),
id="event nt started"
),
]
)
def test_event_object_cancellation(test_event):
with freeze_time():
test_event.cancel()
cancellation_time = datetime.utcnow()
assert test_event.status == AdrEventStatus.CANCELLED.value
assert test_event.end == cancellation_time
test_event.cancel()
assert test_event.end == cancellation_time
def test_event_object_random_cancellation():
test_event = AdrEvent(
id="FooEvent",
start=datetime.utcnow() - timedelta(seconds=60),
signals=[dict(index=0, duration=timedelta(minutes=10), level=2.0)],
status=AdrEventStatus.ACTIVE, cancellation_offset=timedelta(minutes=1)
).to_obj()
now = datetime.utcnow()
with freeze_time(now):
test_event.cancel()
cancellation_time = datetime.utcnow()
assert test_event.status == AdrEventStatus.CANCELLED.value
assert cancellation_time < test_event.end < cancellation_time + timedelta(minutes=1)
with freeze_time(now + timedelta(seconds=30)):
test_event.cancel()
assert cancellation_time < test_event.end < cancellation_time + timedelta(minutes=1)
|
homersoft/oadr2-ven-python | oadr2/xmpp.py | # Classes for sending/receiving OpenADR 2.0 messages via XMPP
# --------
# Requires the python libXML wrapper "lxml" to function properly
# Please not that the standard python xml library is needed as well
__author__ = '<NAME> <<EMAIL>>, <NAME> <<EMAIL>>'
import logging
from io import StringIO
from xml.etree import cElementTree as std_ElementTree
from xml.etree.cElementTree import XML as std_XML
import sleekxmpp
# NOTE: As stated in header, we are using two different XML libraries.
# The python standard XML library is needed because of SleekXMPP
# Yet we try to use the "lxml," module as much as we can.
from lxml import etree as lxml_etree
from sleekxmpp.exceptions import XMPPError
from sleekxmpp.plugins.base import base_plugin
from sleekxmpp.stanza.iq import Iq
from oadr2 import base, event
class OpenADR2(base.BaseHandler):
'''
xmpp.OpenADR2 is the XMPP equivalent of poll.OpenADR2. It will wait for an
oadrDistributeEvent IQ stanza from the XMPP server and then generate a
response IQ and send it to the server.
Memeber variables
--------
(Everything from base.BaseHandler class)
xmpp_client - a sleekxmpp.ClientXMPP object, which will intercept the OpenADR2 stuff for us
user - JID of user for the VEN
password - Password for accompanying JID
server_addr - Address of the XMPP Server
server_port - Port we should connect to
'''
def __init__(self, event_config, user, password, server_addr='localhost', server_port=5222):
'''
Initilize what will do XMPP magic for us
**poll_config -- A dictionary of Keyord arguemnts for the base class (poll.OpenADR2)
user -- JID of whom we want to login to as on the XMPP Server
password - Password for corresponding JID
server_addr -- Address of where the XMPP server is located
server_port -- Port that the XMPP server is listening on
'''
base.BaseHandler.__init__(self, event_config)
# Make sure we set these variables before calling the parent class' constructor
self.xmpp_client = None
self.user = user
self.password = password
self.server_addr = server_addr
self.server_port = int(server_port)
self._init_client(start_thread=True)
def _init_client(self, start_thread):
'''
Setup/Start the client. The base class has a function of the same name,
which is also called in its constructor.
start_thread -- Right now this variable is unused, but is here so it
not conflict with its parent's function declaration.
'''
# Setup the XMPP Client that we are going to be using
self.xmpp_client = sleekxmpp.ClientXMPP(self.user, self.password)
self.xmpp_client.add_event_handler('session_start', self.xmpp_session_start)
self.xmpp_client.add_event_handler('message', self.xmpp_message)
self.xmpp_client.register_plugin('xep_0030')
self.xmpp_client.register_plugin('xep_0199',
pconfig={'keepalive': True, 'frequency': 240})
self.xmpp_client.register_plugin('OpenADR2Plugin',
module='oadr2.xmpp',
pconfig={'callback': self._handle_oadr_payload})
# Setup system information disco
self.xmpp_client['xep_0030'].add_identity(
category='system',
itype='version',
name='OpenADR2 Python VEN')
# Connect and thread the client
self.xmpp_client.connect((self.server_addr, self.server_port))
self.xmpp_client.process(threaded=True)
def xmpp_session_start(self, event):
'''
'session_start' event handler for our XMPP Client. Will just send our
presence.
event -- An empty dictionary. Parameter is just here because of
SleekXMPP requirements.
'''
logging.info('XMPP session has started.')
self.xmpp_client.sendPresence()
def xmpp_message(self, msg):
'''
'message' event handler for our XMPP Client.
NOTE: OpenADR 2.0 XMPP does not use Message stanzas at all, so we should
never receive one, but the handler is here to print it out just in case.
msg -- The Message.
'''
logging.info(msg)
def _handle_oadr_payload(self, msg):
'''
Handle OpenADR2 payloads
msg - A type of OADR2Message
'''
# Try to generate a response payload and send it back
try:
response = self.event_handler.handle_payload(msg.payload)
logging.debug('Response Payload:\n%s\n----\n',
lxml_etree.tostring(response, pretty_print=True))
self.send_reply(response, msg.from_)
except Exception as ex:
logging.exception("Error processing OADR2 log request: %s", ex)
def send_reply(self, payload, to):
'''
Make and OADR2 Message and sends it to someone (if they are online)
payload - The body of the IQ stanza, i.e. the OpenADR xml stuff
(lxml.etree.Element object)
to - The JID of whom the messge will go to
'''
# And send it if we are connected
if self.xmpp_client.state.current_state() != 'connected':
logging.error('Not connected, cannot send response')
return
# Build the IQ reply and send it
iq_reply = Iq(self.xmpp_client, sto=to, stype='set')
# Change the lxml object to a standard Python XML object
iq_reply.set_payload(std_XML(lxml_etree.tostring(payload)))
self.xmpp_client.send(iq_reply)
def exit(self):
'''
Shutdown the module and client.
'''
# Shutdown the xmpp client
logging.info('Shutting down the XMPP Client...')
if self.xmpp_client.state.current_state() == 'connected':
self.xmpp_client.send_presence(pstatus='unavailable')
self.xmpp_client.disconnect()
self.xmpp_client.stop.set()
self.xmpp_client = None
logging.info('XMPP Client shutdown.')
base.BaseHandler.exit(self) # Stop the parent threads
class OADR2Message(object):
'''
Message for OADR2 payload.
Member Variables:
--------
payload -- An XML payload
id -- An ID from the IQ stanza
from -- Whom it is from
stanza_type -- What type of stanza was it
iq_type -- What type of IQ was it (typically 'set' or 'result')
oadr_profile_level -- What version of OpenADR 2.0 we are using (either 2.0a or 2.0b)
ns_map -- The namespaces for the corresponding oadr_profile_level
'''
def __init__(self, payload=None,
id_=None, stanza_type='iq', iq_type='result',
from_=None, to=None,
oadr_profile_level=event.OADR_PROFILE_20A):
'''
Initizlise the message
payload -- What data we want to send (an lxml object)
id_ -- ID of the stanza
stanza_type -- What type of stanza (should be 'iq')
iq_type -- What type of IQ
from_ -- JID of who sent it
to -- Whom it should go to
oadr_profile_level -- What gersion of OpenADR 2.0 we should be using.
Should come from the event module, either
event.OADR_PROFILE_20A, or event.OADR_PROFILE_20B
'''
self.payload = payload
self.id = id_
self.from_ = from_
self.stanza_type = stanza_type
self.iq_type = iq_type
self.oadr_profile_level = oadr_profile_level
# Set the namespace dependant upon the profile level
if self.oadr_profile_level == event.OADR_PROFILE_20A:
self.ns_map = event.NS_A
elif self.oadr_profile_level == event.OADR_PROFILE_20B:
self.ns_map = event.NS_B
else:
self.oadr_profile_level = OADR_PROFILE_20A # Default/Safety, make it the 2.0a spec
self.ns_map = event.NS_A
def get_events(self):
'''
Get the events from a payload.
Returns: All of the events as lxml objects
'''
return self.payload.findall("%{(oadr)s}oadrEvent/{%(ei)s}eiEvent" % self.ns_map)
def get_status(self, event):
'''
Get the status of an event from the payload.
event -- event we are looking for.
Returns: The status of the event as an lxml object
'''
return event.findtext("{%(ei)s}eventDescriptor/{%(ei)s}eventStatus" % self.ns_map)
def get_evt_id(self, event):
'''
Get's the ID of an event.
event -- Event we are looking at.
Returns: An lxml object.
'''
return event.findtext("{%(ei)s}eventDescriptor/{%(ei)s}eventID" % self.ns_map)
def get_mod_num(self, event):
'''
Get's the modification number of an event.
event -- Event we are looking at.
Returns: An lxml object.
'''
return event.findtext("{%(ei)s}eventDescriptor/{%(ei)s}modificationNumber" % self.ns_map)
def get_current_signal_level(self, event):
'''
Get's the current signal levels of an event.
event -- Event we are looking at.
REturns: An lxml object.
'''
return event.findtext(('{%(ei)s}eiEventSignals/{%(ei)s}eiEventSignal/' + \
'{%(ei)s}currentValue/{%(ei)s}payloadFloat/{%(ei)s}value') % self.ns_map)
# Get the message's payload as XML
# Return: An XML String of the payload. Does not include IQ tags
def to_xml(self):
data = []
buffer = StringIO()
if self.payload is not None:
buffer.write(lxml_etree.tostring(self.payload))
data.append(buffer.getvalue())
return data
class OpenADR2Plugin(base_plugin):
'''
OpenADR 2.0 XMPP handler plugin
Member Variables:
--------
All from SleekXMPP's "base_plugin" class
callback -- What function do we want to handle our messages
'''
# Called when initialize the plugin, not the same as __init__
def plugin_init(self):
'''
Initialize the plugin
'''
self.xep = 'OADR2'
self.description = 'OpenADR 2.0 XMPP Plugin'
self.xmpp.add_handler(
"<iq type='set'><oadrDistributeEvent xmlns='%s' /></iq>" % event.OADR_XMLNS_A,
self._handle_iq)
self.callback = self.config.get('callback')
def _handle_iq(self, iq):
'''
Handle an IQ stanza with a payload containing an "oadrDistributeEvent"
tag. This will pass an OADR2Message to 'self.callback'.
iq -- A SleekXMPP Iq object.
'''
logging.debug('OpenADR2 payload [from=%s, to=%s]',
(iq.get('from'), iq.get('to')))
try:
# Convert a "Standard Python Library XML object," to one from lxml
payload_element = lxml_etree.XML(std_ElementTree.tostring(iq[0]))
msg = OADR2Message(
iq_type=iq.get('type'),
id_=iq.get('id'),
from_=iq.get('from'),
payload=payload_element
)
# And pass it to the message handler
self.callback(msg)
except Exception as e:
logging.exception("OADR2 XMPP parse error: %s", e)
raise XMPPError(text=e)
|
homersoft/oadr2-ven-python | oadr2/database.py | <filename>oadr2/database.py
# A small handler/abstraciton layer for SQLite database connections
import logging
import sqlite3
DEFAULT_DB_PATH = 'oadr2.db'
class DBHandler(object):
# Member varialbes:
# --------
# db_path
# The following is a list of which functions relate to which class/handler/module.
# --------
# EventHandler (event.py):
# get_active_events()
# update_all_events()
# update_event()
# get_event()
# remove_events()
# Intilize the handler
#
# db_path - Path to where the database is located
def __init__(self, db_path=DEFAULT_DB_PATH):
self.db_path = db_path
self.init_database()
# Builds the databse, only if it doesn't already exist
# with the tables we want in it.
def init_database(self):
if self.db_path is None or '':
raise ValueError("Database path cannot be empty")
conn = sqlite3.connect(self.db_path)
c = conn.cursor()
# verify if the table already exists:
c.execute("pragma table_info('event')")
if c.fetchone() is not None:
logging.debug('Database `%s` is setup.', self.db_path)
return # table exists.
try:
# NOTE timestamps are SECONDS as a float, not milliseconds.
# see: http://wiki.python.org/moin/WorkingWithTime
c.executescript('''
PRAGMA foreign_keys = ON;
CREATE TABLE event (
id INTEGER PRIMARY KEY,
vtn_id VARCHAR NOT NULL,
event_id VARCHAR NOT NULL,
mod_num INT NOT NULL DEFAULT 0,
raw_xml TEXT NOT NULL
);
CREATE UNIQUE INDEX idx_event_vtn_id ON event (
vtn_id, event_id
);
''')
conn.commit()
logging.debug("Created tables for database %s", self.db_path)
except:
logging.exception("Error creating tables for database %s", self.db_path)
conn.rollback()
finally:
c.close()
conn.close()
# === EventHandler related functions ===
# Gets the actives events for us from the database
#
# Returns: An empty dictionary or a dictionary following the pattern:
# dict['event_id'] = '<xml>blob_for_event</xml>'
def get_active_events(self):
conn = sqlite3.connect(self.db_path)
c = conn.cursor()
try:
c.execute('SELECT event_id, raw_xml FROM event')
# key= event_id, val= xml blob
return {_id: blob for _id, blob in c.fetchall()}
except Exception as ex:
logging.exception('Error getting active events! %s', ex)
raise
finally:
c.close()
conn.close()
# Clears our the current event table and shoves in new ones
#
# records - A list of tuples with the folowing format:
# ('vtn_id', 'event_id', MOD_NUM(integer), '<xml>for_event</xml>')
def update_all_events(self, records):
conn = sqlite3.connect(self.db_path)
c = conn.cursor()
try:
# Clear out the event table
c.execute('DELETE FROM event')
logging.debug('Wiped the event table to update all of the events')
# Insert them into the database
c.executemany('''Insert INTO event(vtn_id, event_id, mod_num, raw_xml)
VALUES(?, ?, ?, ?)''', records)
logging.debug('Inserted the new events into the database')
conn.commit()
except Exception as ex:
logging.error('Error updating events! %s', ex)
conn.rollback()
raise
finally:
c.close()
conn.close()
# Updates an existing event, or inserts a new one
#
# e_id - EventID of whom we want to insert
# mod_num - Current modification number of event (must be an integer)
# raw_xml - Raw XML data for event
# vtn_id - ID of issuing VTN
def update_event(self, e_id, mod_num, raw_xml, vtn_id):
conn = sqlite3.connect(self.db_path)
c = conn.cursor()
try:
# Insert it into the database (or update it)
c.execute('''REPLACE INTO event(vtn_id, event_id, mod_num, raw_xml)
VALUES(?, ?, ?, ?)''', (vtn_id, e_id, mod_num, raw_xml))
conn.commit()
logging.debug('Inserted/updated event_id [%s]', e_id)
except Exception as ex:
logging.error('Error updating event ID [%s]: %s', e_id, ex)
conn.rollback()
raise
finally:
c.close()
conn.close()
# Gets an event for us
#
# event_id - ID of event
# Returns: None on failure, or xml blob
def get_event(self, event_id):
conn = sqlite3.connect(self.db_path)
c = conn.cursor()
try:
# Run the SELECT and see if we got a result
c.execute('SELECT raw_xml FROM event WHERE event_id=?', (event_id,))
row = c.fetchone()
return row[0] if row else None
except Exception as ex:
logging.error('Error getting event ID [%s]: %s', event_id, ex)
raise
finally:
c.close()
conn.close()
# Remove a list of events
#
# event_ids - List of event IDs
def remove_events(self, event_ids):
# Exit if we don't have any EventIDs
if not event_ids:
return
conn = sqlite3.connect(self.db_path)
c = conn.cursor()
# Convert them to tuples
for i in range(len(event_ids)):
event_ids[i] = (event_ids[i],)
# Delete all of the events
try:
c.executemany('DELETE FROM event WHERE event_id=?', event_ids)
logging.debug('Removed events from database.')
conn.commit()
return c.rowcount
except Exception as ex:
logging.error('Error deleting events: %s', ex)
conn.rollback()
raise
finally:
c.close()
conn.close()
|
homersoft/oadr2-ven-python | oadr2/memdb.py | from typing import Dict, NamedTuple, Optional, Sequence, Tuple
EventEntry = NamedTuple("EventEntry", (("vtn_id", str), ("mod_num", int), ("raw_xml", str)))
class DBHandler:
"""
In-memory OADR2 protocol state backing store based on Python dict.
"""
def __init__(self, db_path=""):
self.events = {} # type: Dict[int, EventEntry]
def init_database(self):
pass
def get_active_events(self) -> Dict[int, str]:
return {_id: e.raw_xml
for _id, e in self.events.items()}
def update_all_events(self, records: Sequence[Tuple[str, str, int, str]]) -> None:
self.events = {event_id: EventEntry(vtn_id, mod_num, raw_xml)
for vtn_id, event_id, mod_num, raw_xml in records}
def update_event(self, e_id: int, mod_num: int, raw_xml: str, vtn_id: str) -> None:
self.events.update({e_id: EventEntry(vtn_id, mod_num, raw_xml)})
def get_event(self, event_id: int) -> Optional[str]:
try:
return next(e.raw_xml for _id, e in self.events.items() if _id == event_id)
except StopIteration:
return None
def remove_events(self, event_ids: Sequence[int]) -> None:
for _id in event_ids:
if _id in self.events:
del self.events[_id]
|
homersoft/oadr2-ven-python | oadr2/schedule.py | '''
This module handles scheduling for OpenADR2 entities,
e.g. event schedules, price schedules, etc.
'''
__author__ = '<NAME> <EMAIL>'
import calendar
import datetime
import random
import re
# import logging
from dateutil.relativedelta import relativedelta
DB_PATH = 'oadr2.db'
DURATION_PAT = r'([+-])?P(?:(\d+)Y)?(?:(\d+)M)?(?:(\d+)D)?T?(?:(\d+)H)?(?:(\d+)M)?(?:(\d+)S)?'
DURATION_REX = re.compile(DURATION_PAT)
def parse_duration(dur_str):
'''
Parse a duration string as defined by ISO-8601:
http://en.wikipedia.org/wiki/ISO_8601#Durations
Returns a tuple of `(sign,years,months,days,hours,minutes,seconds)`
If any of the increments are omitted, the value for that
increment will be `0`. If sign is omitted, it defaults to '+'
Example:
`parse_duration('P15DT5H20S')` -> `('+', 0, 0, 15, 5, 0, 20)`
'''
groups = DURATION_REX.match(dur_str).groups()
vals = tuple(int(i) if i is not None else 0 for i in groups[1:])
return (groups[0] or '+',) + vals
def choose_interval(start, interval_list, now=None):
'''
Given a list of durations, find the duration that 'now' falls into.
The returned value is the index of the `dur_list` or `None` if
the last interval still ends at some point before 'now'.
The return value will be -1 if the event has not started yet.
'''
if now is None: now = datetime.datetime.utcnow()
total_time = 0
interval_start_list = durations_to_dates(
start, interval_list)
# logging.debug('All interval starts: %r', interval_start_list)
current_interval_end = None
for i in range(len(interval_start_list)):
new_interval_end = interval_start_list[i]
if new_interval_end > now:
# if the new interval is > now, we are in the interval prior.
# But if the prior interval is index 0, it means the event hasn't
# started yet, in which case return value will = -1
return i - 1
if new_interval_end == current_interval_end:
# means there was a 0 duration, which is a special case meaning
# 'unending' - this interval will always include 'now'
return i - 1
# else look at next interval:
current_interval_end = new_interval_end
# the last interval still did not reach 'now',
# which probably means the event has ended.
return None
def duration_to_delta(duration_str):
'''
Take a duration string like 'PT5M' or 'P0Y0M1DT3H2M1S'
and convert it to a dateutil relativedelta
Returns - a 2-tuple containing (delta, sign) where sign is
either '+' or '-'
'''
vals = parse_duration(duration_str)
sign = vals[0]
return relativedelta(
years=vals[1],
months=vals[2],
days=vals[3],
hours=vals[4],
minutes=vals[5],
seconds=vals[6]), sign
def durations_to_dates(start, dur_list):
'''
Return a date which is the designated amount of time
from the given start datetime.
@param `start` a datetime.datetime instance when the event should start
@param `dur_list` a list of ical duration strings like `PT1M`
@return a datetime which represents the start with the given duration offset
'''
if not isinstance(start, datetime.datetime):
raise ValueError('start must be a datetime object')
new_dttm = start
new_list = [start, ]
for i in range(len(dur_list)):
delta, sign = duration_to_delta(dur_list[i])
new_dttm = new_dttm + delta if sign == '+' else new_dttm - delta
new_list.append(new_dttm)
return new_list
def str_to_datetime(dt_str):
fmt = '%Y-%m-%dT%H:%M:%S.%fZ' if '.' in dt_str \
else '%Y-%m-%dT%H:%M:%SZ'
return datetime.datetime.strptime(dt_str, fmt)
def dttm_to_str(dttm, include_msec=True):
fmt = '%Y-%m-%dT%H:%M:%S.%fZ' if include_msec \
else '%Y-%m-%dT%H:%M:%SZ'
return dttm.strftime(fmt)
def random_offset(dttm, start_before, start_after):
'''
Given a start datetime, and a start_before and start_after duration,
pick a random start time for this event.
'''
if not start_before and not start_after:
return dttm # no offset
min_dttm = dttm - duration_to_delta(start_before)[0] \
if start_before else dttm
max_dttm = dttm + duration_to_delta(start_after)[0] \
if start_after else dttm
timestamp1 = int(calendar.timegm(min_dttm.utctimetuple()))
timestamp2 = int(calendar.timegm(max_dttm.utctimetuple()))
random_start = random.randint(timestamp1, timestamp2)
return datetime.datetime.utcfromtimestamp(random_start)
|
Joshinn-io/augur | augur/__init__.py | <reponame>Joshinn-io/augur
#SPDX-License-Identifier: MIT
import os
ROOT_AUGUR_DIRECTORY = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
|
Joshinn-io/augur | workers/facade_worker/facade_worker/excel_generators/example.py | <gh_stars>100-1000
#!/usr/bin/python3
# Copyright 2017-2018 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
# Create summary Excel file
#
# This script creates a formatted Excel file for easier use in reports. It can
# also be used as a template for generating other types of Excel files. Main
# places to be modified when creating a derivative script are marked with #-->
import sys
import MySQLdb
import imp
import time
import datetime
import xlsxwriter
import os
dirname = os.path.dirname
filepath = os.path.abspath(__file__)
sys.path.append(dirname(dirname(filepath)))
try:
imp.find_module('db')
from db import db,cursor
except:
sys.exit("Can't find db.py. Have you created it?")
def get_setting(setting):
# Get a setting from the database
query = ("SELECT value FROM settings WHERE setting='%s' ORDER BY "
"last_modified DESC LIMIT 1" % setting)
cursor.execute(query)
return cursor.fetchone()["value"]
### The real program starts here ###
#--> Set your filename
filename = 'facade_summary-projects_by_LoC_and_number_contributors_by_year.xlsx'
#--> Set the description of the data
detail = 'LoC added (Unique emails)'
#--> Change this to modify the names of each worksheet
sheets = reversed(list(range(int(get_setting('start_date')[:4]),
datetime.datetime.now().year + 1)))
#--> Change this to modify the x axis
get_x_axis = "SELECT name,id FROM projects"
cursor.execute(get_x_axis)
x_axis = list(cursor)
facade_dir = dirname(dirname(dirname(filepath)))
outfile = os.path.join(facade_dir,'files',filename)
workbook = xlsxwriter.Workbook(outfile)
bold = workbook.add_format({'bold': True})
italic = workbook.add_format({'italic': True})
bold_italic = workbook.add_format({'bold': True, 'italic': True})
numformat = workbook.add_format({'num_format': '#,##0'})
for sheet in sheets:
worksheet = workbook.add_worksheet(str(sheet))
worksheet.write(1,1,'Report generated on %s by Facade' %
time.strftime('%Y-%m-%d'),bold)
worksheet.write(2,1,'https://github.com/brianwarner/facade')
worksheet.write(3,1,'Format: %s' % detail)
top_row = 5
first_col = 1
col = first_col + 1
for x in x_axis:
#--> Change the value of x[''] to match SELECT statment
worksheet.write(top_row,col,x['name'],bold_italic)
col += 1
#--> Change this to modify the y axis
get_y_axis = ("SELECT DISTINCT affiliation FROM project_annual_cache "
"WHERE year = %s "
"ORDER BY affiliation ASC"
% sheet)
cursor.execute(get_y_axis)
y_axis = list(cursor)
row = top_row + 1
for y in y_axis:
#--> Change the value of y[''] to match SELECT statement
worksheet.write(row,first_col,y['affiliation'],bold)
col = first_col + 1
for x in x_axis:
#--> Change this to modify the data
get_stats = ("SELECT FORMAT(SUM(added),0) AS added, "
"FORMAT(COUNT(email),0) AS emails "
"FROM project_annual_cache "
"WHERE affiliation = '%s' "
"AND projects_id = %s "
"AND year = %s"
% (y['affiliation'].replace("'","\\'"),
x['id'], sheet))
cursor.execute(get_stats)
stats = list(cursor)
for stat in stats:
#--> Change this to define the format for each data point
if stat['added']:
worksheet.write(row,col,'%s (%s)'
% (stat['added'], stat['emails']))
col += 1
row += 1
workbook.close()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.